mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
117 Commits
fix-bid-ve
...
getblobv2-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bf0ccf05b2 | ||
|
|
9aeb89cedd | ||
|
|
81d413ca73 | ||
|
|
1933adedbf | ||
|
|
278b796e43 | ||
|
|
a55d61de9c | ||
|
|
4faeac57e1 | ||
|
|
16bed0a93f | ||
|
|
0e9d5f4513 | ||
|
|
65d6be0406 | ||
|
|
115e20de62 | ||
|
|
fb4028f736 | ||
|
|
af7470e835 | ||
|
|
618fe1d1b2 | ||
|
|
71fe640eef | ||
|
|
350fead19b | ||
|
|
23645e549a | ||
|
|
2abe1adb29 | ||
|
|
c9f3e111e6 | ||
|
|
f8410908e9 | ||
|
|
4c328977d9 | ||
|
|
b98a875194 | ||
|
|
ee3a7b980a | ||
|
|
62dfe6bde5 | ||
|
|
3a671abaa0 | ||
|
|
ae6ca74c24 | ||
|
|
bdd423e100 | ||
|
|
b9e31f258b | ||
|
|
7c1eef5780 | ||
|
|
88620a1a63 | ||
|
|
3c9a0b9511 | ||
|
|
a8c5cfd871 | ||
|
|
d3b2122542 | ||
|
|
a12b00c71c | ||
|
|
358b9acc53 | ||
|
|
aa31a16a9c | ||
|
|
86bbb6ee3b | ||
|
|
139c2b887c | ||
|
|
88b03089d2 | ||
|
|
bce0960422 | ||
|
|
58f196c38c | ||
|
|
de63e24815 | ||
|
|
c851987e49 | ||
|
|
141f9adff3 | ||
|
|
47bf2bd985 | ||
|
|
fb28a17fd5 | ||
|
|
338d4c29d2 | ||
|
|
35f6d1277d | ||
|
|
77b3d23c86 | ||
|
|
32e81a33dd | ||
|
|
54ce47a839 | ||
|
|
208febf884 | ||
|
|
b3c9cc2459 | ||
|
|
d8895ec49c | ||
|
|
5e593d50eb | ||
|
|
4878468047 | ||
|
|
e10045cf8c | ||
|
|
b18c4b99c7 | ||
|
|
272a95934f | ||
|
|
e4582ed1e2 | ||
|
|
f2cc2e7128 | ||
|
|
84b1bc7635 | ||
|
|
c51a8f5965 | ||
|
|
fdb858473e | ||
|
|
e2356bb7ad | ||
|
|
8e52d0c3c6 | ||
|
|
d339e09509 | ||
|
|
8ec460223c | ||
|
|
349d9d2fd0 | ||
|
|
e0aecb9c32 | ||
|
|
4a1ab70929 | ||
|
|
240cd1d058 | ||
|
|
26d8b6b786 | ||
|
|
92c359456e | ||
|
|
d48ed44c4c | ||
|
|
dbab624f3d | ||
|
|
83e9cc3dbb | ||
|
|
ee03c7cce2 | ||
|
|
c5135f6995 | ||
|
|
29aedac113 | ||
|
|
08fb3812b7 | ||
|
|
07738dd9a4 | ||
|
|
53df29b07f | ||
|
|
00cf1f2507 | ||
|
|
6528fb9cea | ||
|
|
5021131811 | ||
|
|
26cec9d9c7 | ||
|
|
4ed90a02ef | ||
|
|
7d528c75bb | ||
|
|
e7b2953d5a | ||
|
|
acf35e849e | ||
|
|
c826d334a1 | ||
|
|
eace128ee9 | ||
|
|
9161b80a32 | ||
|
|
009a1507a1 | ||
|
|
fa71a6e117 | ||
|
|
3da40ecd9c | ||
|
|
f7f992c256 | ||
|
|
09565e0c3a | ||
|
|
05a3736310 | ||
|
|
84c8653a52 | ||
|
|
921ff23c6b | ||
|
|
87235fb384 | ||
|
|
2ec5914b4a | ||
|
|
fe000e5629 | ||
|
|
447a3d8add | ||
|
|
b00aaef202 | ||
|
|
0f6070a866 | ||
|
|
2a09c9f681 | ||
|
|
9c6ccd67c1 | ||
|
|
36e5d4926b | ||
|
|
17b7d3ff12 | ||
|
|
fb2bceece8 | ||
|
|
d012ab653c | ||
|
|
46e7555c42 | ||
|
|
181df3995e | ||
|
|
149e220b61 |
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,6 +1,6 @@
|
||||
name: 🐞 Bug report
|
||||
description: Report a bug or problem with running Prysm
|
||||
labels: ["Bug"]
|
||||
type: "Bug"
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
|
||||
43
.github/workflows/check-specrefs.yml
vendored
Normal file
43
.github/workflows/check-specrefs.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: Check Spec References
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
check-specrefs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Check version consistency
|
||||
run: |
|
||||
WORKSPACE_VERSION=$(grep 'consensus_spec_version = ' WORKSPACE | sed 's/.*"\(.*\)"/\1/')
|
||||
ETHSPECIFY_VERSION=$(grep '^version:' specrefs/.ethspecify.yml | sed 's/version: //')
|
||||
if [ "$WORKSPACE_VERSION" != "$ETHSPECIFY_VERSION" ]; then
|
||||
echo "Version mismatch between WORKSPACE and ethspecify"
|
||||
echo " WORKSPACE: $WORKSPACE_VERSION"
|
||||
echo " specrefs/.ethspecify.yml: $ETHSPECIFY_VERSION"
|
||||
exit 1
|
||||
else
|
||||
echo "Versions match: $WORKSPACE_VERSION"
|
||||
fi
|
||||
|
||||
- name: Install ethspecify
|
||||
run: python3 -mpip install ethspecify
|
||||
|
||||
- name: Update spec references
|
||||
run: ethspecify process --path=specrefs
|
||||
|
||||
- name: Check for differences
|
||||
run: |
|
||||
if ! git diff --exit-code specrefs >/dev/null; then
|
||||
echo "Spec references are out-of-date!"
|
||||
echo ""
|
||||
git --no-pager diff specrefs
|
||||
exit 1
|
||||
else
|
||||
echo "Spec references are up-to-date!"
|
||||
fi
|
||||
|
||||
- name: Check spec references
|
||||
run: ethspecify check --path=specrefs
|
||||
14
WORKSPACE
14
WORKSPACE
@@ -190,7 +190,7 @@ load("@rules_oci//oci:pull.bzl", "oci_pull")
|
||||
# A multi-arch base image
|
||||
oci_pull(
|
||||
name = "linux_debian11_multiarch_base", # Debian bullseye
|
||||
digest = "sha256:b82f113425c5b5c714151aaacd8039bc141821cdcd3c65202d42bdf9c43ae60b", # 2023-12-12
|
||||
digest = "sha256:55a5e011b2c4246b4c51e01fcc2b452d151e03df052e357465f0392fcd59fddf",
|
||||
image = "gcr.io/prysmaticlabs/distroless/cc-debian11",
|
||||
platforms = [
|
||||
"linux/amd64",
|
||||
@@ -208,7 +208,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.24.0",
|
||||
go_version = "1.24.6",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -253,16 +253,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.6.0-alpha.1"
|
||||
consensus_spec_version = "v1.6.0-alpha.6"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-o4t9p3R+fQHF4KOykGmwlG3zDw5wUdVWprkzId8aIsk=",
|
||||
"minimal": "sha256-sU7ToI8t3MR8x0vVjC8ERmAHZDWpEmnAC9FWIpHi5x4=",
|
||||
"mainnet": "sha256-YKS4wngg0LgI9Upp4MYJ77aG+8+e/G4YeqEIlp06LZw=",
|
||||
"general": "sha256-7wkWuahuCO37uVYnxq8Badvi+jY907pBj68ixL8XDOI=",
|
||||
"minimal": "sha256-Qy/f27N0LffS/ej7VhIubwDejD6LMK0VdenKkqtZVt4=",
|
||||
"mainnet": "sha256-3H7mu5yE+FGz2Wr/nc8Nd9aEu93YoEpsYtn0zBSoeDE=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -278,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-Nv4TEuEJPQIM4E6T9J0FOITsmappmXZjGtlhe1HEXnU=",
|
||||
integrity = "sha256-uvz3XfMTGfy3/BtQQoEp5XQOgrWgcH/5Zo/gR0iiP+k=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -16,7 +16,6 @@ go_library(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/client"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
@@ -137,24 +135,6 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
|
||||
return fr.ToConsensus()
|
||||
}
|
||||
|
||||
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
|
||||
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
|
||||
body, err := c.Get(ctx, getForkSchedulePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting fork schedule")
|
||||
}
|
||||
fsr := &forkScheduleResponse{}
|
||||
err = json.Unmarshal(body, fsr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ofs, err := fsr.OrderedForkSchedule()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
|
||||
}
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
|
||||
func (c *Client) GetConfigSpec(ctx context.Context) (*structs.GetSpecResponse, error) {
|
||||
body, err := c.Get(ctx, getConfigSpecPath)
|
||||
@@ -334,31 +314,3 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*structs.BLSToEx
|
||||
}
|
||||
return poolResponse, nil
|
||||
}
|
||||
|
||||
type forkScheduleResponse struct {
|
||||
Data []structs.Fork
|
||||
}
|
||||
|
||||
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
|
||||
ofs := make(forks.OrderedSchedule, 0)
|
||||
for _, d := range fsr.Data {
|
||||
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
|
||||
}
|
||||
vSlice, err := hexutil.Decode(d.CurrentVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(vSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
|
||||
}
|
||||
version := bytesutil.ToBytes4(vSlice)
|
||||
ofs = append(ofs, forks.ForkScheduleEntry{
|
||||
Version: version,
|
||||
Epoch: primitives.Epoch(epoch),
|
||||
})
|
||||
}
|
||||
sort.Sort(ofs)
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
@@ -102,6 +102,7 @@ type BuilderClient interface {
|
||||
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error)
|
||||
RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error
|
||||
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error)
|
||||
SubmitBlindedBlockPostFulu(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) error
|
||||
Status(ctx context.Context) error
|
||||
}
|
||||
|
||||
@@ -152,7 +153,8 @@ func (c *Client) NodeURL() string {
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
// do is a generic, opinionated request function to reduce boilerplate amongst the methods in this package api/client/builder.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) (res []byte, header http.Header, err error) {
|
||||
// It validates that the HTTP response status matches the expectedStatus parameter.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, expectedStatus int, opts ...reqOption) (res []byte, header http.Header, err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.client.do")
|
||||
defer func() {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -187,8 +189,8 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
|
||||
log.WithError(closeErr).Error("Failed to close response body")
|
||||
}
|
||||
}()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
err = non200Err(r)
|
||||
if r.StatusCode != expectedStatus {
|
||||
err = unexpectedStatusErr(r, expectedStatus)
|
||||
return
|
||||
}
|
||||
res, err = io.ReadAll(io.LimitReader(r.Body, client.MaxBodySize))
|
||||
@@ -236,7 +238,7 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
|
||||
r.Header.Set("Accept", api.JsonMediaType)
|
||||
}
|
||||
}
|
||||
data, header, err := c.do(ctx, http.MethodGet, path, nil, getOpts)
|
||||
data, header, err := c.do(ctx, http.MethodGet, path, nil, http.StatusOK, getOpts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting header from builder server")
|
||||
}
|
||||
@@ -409,7 +411,7 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
}
|
||||
}
|
||||
|
||||
if _, _, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body), postOpts); err != nil {
|
||||
if _, _, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body), http.StatusOK, postOpts); err != nil {
|
||||
return errors.Wrap(err, "do")
|
||||
}
|
||||
log.WithField("registrationCount", len(svr)).Debug("Successfully registered validator(s) on builder")
|
||||
@@ -471,7 +473,7 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
|
||||
// post the blinded block - the execution payload response should contain the unblinded payload, along with the
|
||||
// blobs bundle if it is post deneb.
|
||||
data, header, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), postOpts)
|
||||
data, header, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), http.StatusOK, postOpts)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the blinded block to the builder api")
|
||||
}
|
||||
@@ -501,6 +503,24 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
return ed, blobs, nil
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu calls the builder API endpoint post-Fulu where relays only return status codes.
|
||||
// This method is used after the Fulu fork when MEV-boost relays no longer return execution payloads.
|
||||
func (c *Client) SubmitBlindedBlockPostFulu(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
body, postOpts, err := c.buildBlindedBlockRequest(sb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Post the blinded block - the response should only contain a status code (no payload)
|
||||
_, _, err = c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), http.StatusAccepted, postOpts)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error posting the blinded block to the builder api post-Fulu")
|
||||
}
|
||||
|
||||
// Success is indicated by no error (status 202)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) checkBlockVersion(respBytes []byte, header http.Header) (int, error) {
|
||||
var versionHeader string
|
||||
if c.sszEnabled {
|
||||
@@ -657,11 +677,11 @@ func (c *Client) Status(ctx context.Context) error {
|
||||
getOpts := func(r *http.Request) {
|
||||
r.Header.Set("Accept", api.JsonMediaType)
|
||||
}
|
||||
_, _, err := c.do(ctx, http.MethodGet, getStatus, nil, getOpts)
|
||||
_, _, err := c.do(ctx, http.MethodGet, getStatus, nil, http.StatusOK, getOpts)
|
||||
return err
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
func unexpectedStatusErr(response *http.Response, expected int) error {
|
||||
bodyBytes, err := io.ReadAll(io.LimitReader(response.Body, client.MaxErrBodySize))
|
||||
var errMessage ErrorMessage
|
||||
var body string
|
||||
@@ -670,7 +690,7 @@ func non200Err(response *http.Response) error {
|
||||
} else {
|
||||
body = "response body:\n" + string(bodyBytes)
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
msg := fmt.Sprintf("expected=%d, got=%d, url=%s, body=%s", expected, response.StatusCode, response.Request.URL, body)
|
||||
switch response.StatusCode {
|
||||
case http.StatusUnsupportedMediaType:
|
||||
log.WithError(ErrUnsupportedMediaType).Debug(msg)
|
||||
|
||||
@@ -1555,6 +1555,89 @@ func testSignedBlindedBeaconBlockElectra(t *testing.T) *eth.SignedBlindedBeaconB
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubmitBlindedBlockPostFulu(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
// Post-Fulu: only return status code, no payload
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
Body: io.NopCloser(bytes.NewBufferString("")),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("success_ssz", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get(api.VersionHeader))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
// Post-Fulu: only return status code, no payload
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
Body: io.NopCloser(bytes.NewBufferString("")),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
sszEnabled: true,
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("error_response", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
message := ErrorMessage{
|
||||
Code: 400,
|
||||
Message: "Bad Request",
|
||||
}
|
||||
resp, err := json.Marshal(message)
|
||||
require.NoError(t, err)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Body: io.NopCloser(bytes.NewBuffer(resp)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
|
||||
require.ErrorIs(t, err, ErrNotOK)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRequestLogger(t *testing.T) {
|
||||
wo := WithObserver(&requestLogger{})
|
||||
c, err := NewClient("localhost:3500", wo)
|
||||
@@ -1727,7 +1810,7 @@ func TestSubmitBlindedBlock_BlobsBundlerInterface(t *testing.T) {
|
||||
t.Run("Interface signature verification", func(t *testing.T) {
|
||||
// This test verifies that the SubmitBlindedBlock method signature
|
||||
// has been updated to return BlobsBundler interface
|
||||
|
||||
|
||||
client := &Client{}
|
||||
|
||||
// Verify the method exists with the correct signature
|
||||
|
||||
@@ -45,6 +45,11 @@ func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySig
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu --
|
||||
func (MockClient) SubmitBlindedBlockPostFulu(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status --
|
||||
func (MockClient) Status(_ context.Context) error {
|
||||
return nil
|
||||
|
||||
@@ -1699,7 +1699,7 @@ func TestExecutionPayloadHeaderCapellaRoundtrip(t *testing.T) {
|
||||
require.DeepEqual(t, string(expected[0:len(expected)-1]), string(m))
|
||||
}
|
||||
|
||||
func TestErrorMessage_non200Err(t *testing.T) {
|
||||
func TestErrorMessage_unexpectedStatusErr(t *testing.T) {
|
||||
mockRequest := &http.Request{
|
||||
URL: &url.URL{Path: "example.com"},
|
||||
}
|
||||
@@ -1779,7 +1779,7 @@ func TestErrorMessage_non200Err(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := non200Err(tt.args)
|
||||
err := unexpectedStatusErr(tt.args, http.StatusOK)
|
||||
if err != nil && tt.wantMessage != "" {
|
||||
require.ErrorContains(t, tt.wantMessage, err)
|
||||
}
|
||||
|
||||
@@ -73,6 +73,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -181,6 +182,7 @@ go_test(
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -194,6 +196,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -624,6 +625,7 @@ func Test_hashForGenesisRoot(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 10)
|
||||
genesis.StoreDuringTest(t, genesis.GenesisData{State: st})
|
||||
require.NoError(t, c.cfg.BeaconDB.SaveGenesisData(ctx, st))
|
||||
root, err := beaconDB.GenesisBlockRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -7,10 +7,15 @@ type currentlySyncingBlock struct {
|
||||
roots map[[32]byte]struct{}
|
||||
}
|
||||
|
||||
func (b *currentlySyncingBlock) set(root [32]byte) {
|
||||
func (b *currentlySyncingBlock) set(root [32]byte) error {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
_, ok := b.roots[root]
|
||||
if ok {
|
||||
return errBlockBeingSynced
|
||||
}
|
||||
b.roots[root] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *currentlySyncingBlock) unset(root [32]byte) {
|
||||
|
||||
@@ -44,6 +44,8 @@ var (
|
||||
errMaxBlobsExceeded = verification.AsVerificationFailure(errors.New("expected commitments in block exceeds MAX_BLOBS_PER_BLOCK"))
|
||||
// errMaxDataColumnsExceeded is returned when the number of data columns exceeds the maximum allowed.
|
||||
errMaxDataColumnsExceeded = verification.AsVerificationFailure(errors.New("expected data columns for node exceeds NUMBER_OF_COLUMNS"))
|
||||
// errBlockBeingSynced is returned when a block is being synced.
|
||||
errBlockBeingSynced = errors.New("block is being synced")
|
||||
)
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
v1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -309,6 +310,7 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
|
||||
block: wba,
|
||||
}
|
||||
|
||||
genesis.StoreStateDuringTest(t, st)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
|
||||
a := &fcuConfig{
|
||||
@@ -403,6 +405,7 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
genesis.StoreStateDuringTest(t, bState)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, bState, bra))
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)
|
||||
|
||||
@@ -14,7 +14,10 @@ const BytesPerBlob = ckzg4844.BytesPerBlob
|
||||
type Blob [BytesPerBlob]byte
|
||||
|
||||
// BytesPerCell is the number of bytes in a single cell.
|
||||
const BytesPerCell = ckzg4844.BytesPerCell
|
||||
const (
|
||||
BytesPerCell = ckzg4844.BytesPerCell
|
||||
BytesPerProof = ckzg4844.BytesPerProof
|
||||
)
|
||||
|
||||
// Cell represents a chunk of an encoded Blob.
|
||||
type Cell [BytesPerCell]byte
|
||||
@@ -23,7 +26,7 @@ type Cell [BytesPerCell]byte
|
||||
type Commitment [48]byte
|
||||
|
||||
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
|
||||
type Proof [48]byte
|
||||
type Proof [BytesPerProof]byte
|
||||
|
||||
// Bytes48 is a 48-byte array.
|
||||
type Bytes48 = ckzg4844.Bytes48
|
||||
@@ -102,7 +105,6 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
|
||||
for i := range cells {
|
||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
||||
}
|
||||
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,32 +1,14 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
|
||||
func Verify(sidecars ...blocks.ROBlob) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(sidecars) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(sidecars[0].Blob),
|
||||
bytesToCommitment(sidecars[0].KzgCommitment),
|
||||
bytesToKZGProof(sidecars[0].KzgProof))
|
||||
}
|
||||
blobs := make([]GoKZG.Blob, len(sidecars))
|
||||
cmts := make([]GoKZG.KZGCommitment, len(sidecars))
|
||||
proofs := make([]GoKZG.KZGProof, len(sidecars))
|
||||
for i, sidecar := range sidecars {
|
||||
blobs[i] = *bytesToBlob(sidecar.Blob)
|
||||
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
|
||||
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
|
||||
}
|
||||
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
|
||||
}
|
||||
|
||||
func bytesToBlob(blob []byte) *GoKZG.Blob {
|
||||
var ret GoKZG.Blob
|
||||
copy(ret[:], blob)
|
||||
@@ -42,3 +24,144 @@ func bytesToKZGProof(proof []byte) (ret GoKZG.KZGProof) {
|
||||
copy(ret[:], proof)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
|
||||
func Verify(blobSidecars ...blocks.ROBlob) error {
|
||||
if len(blobSidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(blobSidecars) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(blobSidecars[0].Blob),
|
||||
bytesToCommitment(blobSidecars[0].KzgCommitment),
|
||||
bytesToKZGProof(blobSidecars[0].KzgProof))
|
||||
}
|
||||
blobs := make([]GoKZG.Blob, len(blobSidecars))
|
||||
cmts := make([]GoKZG.KZGCommitment, len(blobSidecars))
|
||||
proofs := make([]GoKZG.KZGProof, len(blobSidecars))
|
||||
for i, sidecar := range blobSidecars {
|
||||
blobs[i] = *bytesToBlob(sidecar.Blob)
|
||||
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
|
||||
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
|
||||
}
|
||||
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
|
||||
}
|
||||
|
||||
// VerifyBlobKZGProofBatch verifies KZG proofs for multiple blobs using batch verification.
|
||||
// This is more efficient than verifying each blob individually when len(blobs) > 1.
|
||||
// For single blob verification, it uses the optimized single verification path.
|
||||
func VerifyBlobKZGProofBatch(blobs [][]byte, commitments [][]byte, proofs [][]byte) error {
|
||||
if len(blobs) != len(commitments) || len(blobs) != len(proofs) {
|
||||
return errors.Errorf("number of blobs (%d), commitments (%d), and proofs (%d) must match", len(blobs), len(commitments), len(proofs))
|
||||
}
|
||||
|
||||
if len(blobs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Optimize for single blob case - use single verification to avoid batch overhead
|
||||
if len(blobs) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(blobs[0]),
|
||||
bytesToCommitment(commitments[0]),
|
||||
bytesToKZGProof(proofs[0]))
|
||||
}
|
||||
|
||||
// Use batch verification for multiple blobs
|
||||
ckzgBlobs := make([]ckzg4844.Blob, len(blobs))
|
||||
ckzgCommitments := make([]ckzg4844.Bytes48, len(commitments))
|
||||
ckzgProofs := make([]ckzg4844.Bytes48, len(proofs))
|
||||
|
||||
for i := range blobs {
|
||||
if len(blobs[i]) != len(ckzg4844.Blob{}) {
|
||||
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
if len(commitments[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
if len(proofs[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(proofs[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
ckzgBlobs[i] = ckzg4844.Blob(blobs[i])
|
||||
ckzgCommitments[i] = ckzg4844.Bytes48(commitments[i])
|
||||
ckzgProofs[i] = ckzg4844.Bytes48(proofs[i])
|
||||
}
|
||||
|
||||
valid, err := ckzg4844.VerifyBlobKZGProofBatch(ckzgBlobs, ckzgCommitments, ckzgProofs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "batch verification")
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("batch KZG proof verification failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyCellKZGProofBatchFromBlobData verifies cell KZG proofs in batch format directly from blob data.
|
||||
// This is more efficient than reconstructing data column sidecars when you have the raw blob data and cell proofs.
|
||||
// For PeerDAS/Fulu, the execution client provides cell proofs in flattened format via BlobsBundleV2.
|
||||
// For single blob verification, it optimizes by computing cells once and verifying efficiently.
|
||||
func VerifyCellKZGProofBatchFromBlobData(blobs [][]byte, commitments [][]byte, cellProofs [][]byte, numberOfColumns uint64) error {
|
||||
blobCount := uint64(len(blobs))
|
||||
expectedCellProofs := blobCount * numberOfColumns
|
||||
|
||||
if uint64(len(cellProofs)) != expectedCellProofs {
|
||||
return errors.Errorf("expected %d cell proofs, got %d", expectedCellProofs, len(cellProofs))
|
||||
}
|
||||
|
||||
if len(commitments) != len(blobs) {
|
||||
return errors.Errorf("number of commitments (%d) must match number of blobs (%d)", len(commitments), len(blobs))
|
||||
}
|
||||
|
||||
if blobCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle multiple blobs - compute cells for all blobs
|
||||
allCells := make([]Cell, 0, expectedCellProofs)
|
||||
allCommitments := make([]Bytes48, 0, expectedCellProofs)
|
||||
allIndices := make([]uint64, 0, expectedCellProofs)
|
||||
allProofs := make([]Bytes48, 0, expectedCellProofs)
|
||||
|
||||
for blobIndex := range blobs {
|
||||
if len(blobs[blobIndex]) != len(Blob{}) {
|
||||
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[blobIndex]), len(Blob{}))
|
||||
}
|
||||
// Convert blob to kzg.Blob type
|
||||
blob := Blob(blobs[blobIndex])
|
||||
|
||||
// Compute cells for this blob
|
||||
cells, err := ComputeCells(&blob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to compute cells for blob %d", blobIndex)
|
||||
}
|
||||
|
||||
// Add cells and corresponding data for each column
|
||||
for columnIndex := range numberOfColumns {
|
||||
cellProofIndex := uint64(blobIndex)*numberOfColumns + columnIndex
|
||||
if len(commitments[blobIndex]) != len(Bytes48{}) {
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[blobIndex]), len(Bytes48{}))
|
||||
}
|
||||
if len(cellProofs[cellProofIndex]) != len(Bytes48{}) {
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(cellProofs[cellProofIndex]), len(Bytes48{}))
|
||||
}
|
||||
allCells = append(allCells, cells[columnIndex])
|
||||
allCommitments = append(allCommitments, Bytes48(commitments[blobIndex]))
|
||||
allIndices = append(allIndices, columnIndex)
|
||||
|
||||
allProofs = append(allProofs, Bytes48(cellProofs[cellProofIndex]))
|
||||
}
|
||||
}
|
||||
|
||||
// Batch verify all cells
|
||||
valid, err := VerifyCellKZGProofBatch(allCommitments, allIndices, allCells, allProofs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cell batch verification")
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("cell KZG proof batch verification failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,8 +22,8 @@ func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZG
|
||||
}
|
||||
|
||||
func TestVerify(t *testing.T) {
|
||||
sidecars := make([]blocks.ROBlob, 0)
|
||||
require.NoError(t, Verify(sidecars...))
|
||||
blobSidecars := make([]blocks.ROBlob, 0)
|
||||
require.NoError(t, Verify(blobSidecars...))
|
||||
}
|
||||
|
||||
func TestBytesToAny(t *testing.T) {
|
||||
@@ -37,6 +37,7 @@ func TestBytesToAny(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
require.NoError(t, Start())
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
@@ -45,3 +46,432 @@ func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
require.Equal(t, expectedCommitment, commitment)
|
||||
require.Equal(t, expectedProof, proof)
|
||||
}
|
||||
|
||||
func TestVerifyBlobKZGProofBatch(t *testing.T) {
|
||||
// Initialize KZG for testing
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid single blob batch", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
proofs := [][]byte{proof[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("valid multiple blob batch", func(t *testing.T) {
|
||||
blobCount := 3
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
proofs := make([][]byte, blobCount)
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
blob := random.GetRandBlob(int64(i))
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
proofs[i] = proof[:]
|
||||
}
|
||||
|
||||
err := VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty inputs should pass", func(t *testing.T) {
|
||||
err := VerifyBlobKZGProofBatch([][]byte{}, [][]byte{}, [][]byte{})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("mismatched input lengths", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test different mismatch scenarios
|
||||
err = VerifyBlobKZGProofBatch(
|
||||
[][]byte{blob[:]},
|
||||
[][]byte{},
|
||||
[][]byte{proof[:]},
|
||||
)
|
||||
require.ErrorContains(t, "number of blobs (1), commitments (0), and proofs (1) must match", err)
|
||||
|
||||
err = VerifyBlobKZGProofBatch(
|
||||
[][]byte{blob[:], blob[:]},
|
||||
[][]byte{commitment[:]},
|
||||
[][]byte{proof[:], proof[:]},
|
||||
)
|
||||
require.ErrorContains(t, "number of blobs (2), commitments (1), and proofs (2) must match", err)
|
||||
})
|
||||
|
||||
t.Run("invalid commitment should fail", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
_, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use a different blob's commitment (mismatch)
|
||||
differentBlob := random.GetRandBlob(456)
|
||||
wrongCommitment, _, err := GenerateCommitmentAndProof(differentBlob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{wrongCommitment[:]}
|
||||
proofs := [][]byte{proof[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
// Single blob optimization uses different error message
|
||||
require.ErrorContains(t, "can't verify opening proof", err)
|
||||
})
|
||||
|
||||
t.Run("invalid proof should fail", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, _, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use wrong proof
|
||||
invalidProof := make([]byte, 48) // All zeros
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
proofs := [][]byte{invalidProof}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "short buffer", err)
|
||||
})
|
||||
|
||||
t.Run("mixed valid and invalid proofs should fail", func(t *testing.T) {
|
||||
// First blob - valid
|
||||
blob1 := random.GetRandBlob(123)
|
||||
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Second blob - invalid proof
|
||||
blob2 := random.GetRandBlob(456)
|
||||
commitment2, _, err := GenerateCommitmentAndProof(blob2)
|
||||
require.NoError(t, err)
|
||||
invalidProof := make([]byte, 48) // All zeros
|
||||
|
||||
blobs := [][]byte{blob1[:], blob2[:]}
|
||||
commitments := [][]byte{commitment1[:], commitment2[:]}
|
||||
proofs := [][]byte{proof1[:], invalidProof}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "batch verification", err)
|
||||
})
|
||||
|
||||
t.Run("batch KZG proof verification failed", func(t *testing.T) {
|
||||
// Create multiple blobs with mismatched commitments and proofs to trigger batch verification failure
|
||||
blob1 := random.GetRandBlob(123)
|
||||
blob2 := random.GetRandBlob(456)
|
||||
|
||||
// Generate valid proof for blob1
|
||||
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate valid proof for blob2 but use wrong commitment (from blob1)
|
||||
_, proof2, err := GenerateCommitmentAndProof(blob2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use blob2 data with blob1's commitment and blob2's proof - this should cause batch verification to fail
|
||||
blobs := [][]byte{blob1[:], blob2[:]}
|
||||
commitments := [][]byte{commitment1[:], commitment1[:]} // Wrong commitment for blob2
|
||||
proofs := [][]byte{proof1[:], proof2[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "batch KZG proof verification failed", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
// Initialize KZG for testing
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid single blob cell verification", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Generate blob and commitment
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute cells and proofs
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create flattened cell proofs (like execution client format)
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = cellsAndProofs.Proofs[i][:]
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("valid multiple blob cell verification", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := range blobCount {
|
||||
// Generate blob and commitment
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute cells and proofs
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs for this blob
|
||||
for j := range numberOfColumns {
|
||||
allCellProofs = append(allCellProofs, cellsAndProofs.Proofs[j][:])
|
||||
}
|
||||
}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty inputs should pass", func(t *testing.T) {
|
||||
err := VerifyCellKZGProofBatchFromBlobData([][]byte{}, [][]byte{}, [][]byte{}, 128)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("mismatched blob and commitment count", func(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(
|
||||
[][]byte{blob[:]},
|
||||
[][]byte{}, // Empty commitments
|
||||
[][]byte{},
|
||||
128,
|
||||
)
|
||||
require.ErrorContains(t, "expected 128 cell proofs", err)
|
||||
})
|
||||
|
||||
t.Run("wrong cell proof count", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
// Wrong number of cell proofs - should be 128 for 1 blob, but provide 10
|
||||
wrongCellProofs := make([][]byte, 10)
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, wrongCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "expected 128 cell proofs, got 10", err)
|
||||
})
|
||||
|
||||
t.Run("invalid cell proofs should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
// Create invalid cell proofs (all zeros)
|
||||
invalidCellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
invalidCellProofs[i] = make([]byte, 48) // All zeros
|
||||
}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "cell batch verification", err)
|
||||
})
|
||||
|
||||
t.Run("mismatched commitment should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Generate blob and correct cell proofs
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate wrong commitment from different blob
|
||||
randBlob2 := random.GetRandBlob(456)
|
||||
var differentBlob Blob
|
||||
copy(differentBlob[:], randBlob2[:])
|
||||
wrongCommitment, err := BlobToKZGCommitment(&differentBlob)
|
||||
require.NoError(t, err)
|
||||
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = cellsAndProofs.Proofs[i][:]
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{wrongCommitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "cell KZG proof batch verification failed", err)
|
||||
})
|
||||
|
||||
t.Run("invalid blob data that should cause ComputeCells to fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Create invalid blob (not properly formatted)
|
||||
invalidBlobData := make([]byte, 10) // Too short
|
||||
commitment := make([]byte, 48) // Dummy commitment
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
blobs := [][]byte{invalidBlobData}
|
||||
commitments := [][]byte{commitment}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorContains(t, "blobs len (10) differs from expected (131072)", err)
|
||||
})
|
||||
|
||||
t.Run("invalid commitment size should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
// Create invalid commitment (wrong size)
|
||||
invalidCommitment := make([]byte, 32) // Should be 48 bytes
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{invalidCommitment}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("invalid cell proof size should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create invalid cell proofs (wrong size)
|
||||
invalidCellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
if i == 0 {
|
||||
invalidCellProofs[i] = make([]byte, 32) // Wrong size - should be 48
|
||||
} else {
|
||||
invalidCellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "proofs len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("multiple blobs with mixed invalid commitments", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
// First blob - valid
|
||||
randBlob1 := random.GetRandBlob(123)
|
||||
var blob1 Blob
|
||||
copy(blob1[:], randBlob1[:])
|
||||
commitment1, err := BlobToKZGCommitment(&blob1)
|
||||
require.NoError(t, err)
|
||||
blobs[0] = blob1[:]
|
||||
commitments[0] = commitment1[:]
|
||||
|
||||
// Second blob - use invalid commitment size
|
||||
randBlob2 := random.GetRandBlob(456)
|
||||
var blob2 Blob
|
||||
copy(blob2[:], randBlob2[:])
|
||||
blobs[1] = blob2[:]
|
||||
commitments[1] = make([]byte, 32) // Wrong size
|
||||
|
||||
// Add cell proofs for both blobs
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("multiple blobs with mixed invalid cell proof sizes", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs - make some invalid in the second blob
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
if i == 1 && j == 64 {
|
||||
// Invalid proof size in middle of second blob's proofs
|
||||
allCellProofs = append(allCellProofs, make([]byte, 20))
|
||||
} else {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "proofs len (20) differs from expected (48)", err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
@@ -235,14 +234,6 @@ func WithSyncChecker(checker Checker) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithCustodyInfo sets the custody info for the blockchain service.
|
||||
func WithCustodyInfo(custodyInfo *peerdas.CustodyInfo) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.CustodyInfo = custodyInfo
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSlasherEnabled sets whether the slasher is enabled or not.
|
||||
func WithSlasherEnabled(enabled bool) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -159,7 +159,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
|
||||
// Fill in missing blocks
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.FinalizedCheckpoint(), preState.CurrentJustifiedCheckpoint()); err != nil {
|
||||
return errors.Wrap(err, "could not fill in missing blocks to forkchoice")
|
||||
}
|
||||
|
||||
@@ -240,9 +240,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
}
|
||||
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", b.Block().Slot())
|
||||
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
|
||||
}
|
||||
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
@@ -308,6 +309,30 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityStore, roBlock consensusblocks.ROBlock) error {
|
||||
blockVersion := roBlock.Version()
|
||||
block := roBlock.Block()
|
||||
slot := block.Slot()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.areDataColumnsAvailable(ctx, roBlock.Root(), block); err != nil {
|
||||
return errors.Wrapf(err, "are data columns available for block %#x with slot %d", roBlock.Root(), slot)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), roBlock); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", slot)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
e := coreTime.CurrentEpoch(st)
|
||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||
@@ -584,7 +609,7 @@ func (s *Service) runLateBlockTasks() {
|
||||
// It returns a map where each key represents a missing BlobSidecar index.
|
||||
// An empty map means we have all indices; a non-empty map can be used to compare incoming
|
||||
// BlobSidecars against the set of known missing sidecars.
|
||||
func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
|
||||
func missingBlobIndices(store *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
@@ -592,7 +617,7 @@ func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength
|
||||
if len(expected) > maxBlobsPerBlock {
|
||||
return nil, errMaxBlobsExceeded
|
||||
}
|
||||
indices := bs.Summary(root)
|
||||
indices := store.Summary(root)
|
||||
missing := make(map[uint64]bool, len(expected))
|
||||
for i := range expected {
|
||||
if len(expected[i]) > 0 && !indices.HasIndex(uint64(i)) {
|
||||
@@ -607,7 +632,7 @@ func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength
|
||||
// It returns a map where each key represents a missing DataColumnSidecar index.
|
||||
// An empty map means we have all indices; a non-empty map can be used to compare incoming
|
||||
// DataColumns against the set of known missing sidecars.
|
||||
func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -619,7 +644,7 @@ func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparam
|
||||
}
|
||||
|
||||
// Get a summary of the data columns stored in the database.
|
||||
summary := bs.Summary(root)
|
||||
summary := store.Summary(root)
|
||||
|
||||
// Check all expected data columns against the summary.
|
||||
missing := make(map[uint64]bool)
|
||||
@@ -639,14 +664,14 @@ func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparam
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
signedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
roBlock consensusblocks.ROBlock,
|
||||
) error {
|
||||
block := signedBlock.Block()
|
||||
block := roBlock.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
|
||||
root := roBlock.Root()
|
||||
blockVersion := block.Version()
|
||||
if blockVersion >= version.Fulu {
|
||||
return s.areDataColumnsAvailable(ctx, root, block)
|
||||
@@ -666,7 +691,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
root [fieldparams.RootLength]byte,
|
||||
block interfaces.ReadOnlyBeaconBlock,
|
||||
) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS.
|
||||
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
@@ -689,16 +714,21 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
// All columns to sample need to be available for the block to be considered available.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Prevent custody group count to change during the rest of the function.
|
||||
s.cfg.CustodyInfo.Mut.RLock()
|
||||
defer s.cfg.CustodyInfo.Mut.RUnlock()
|
||||
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupSamplingSize := s.cfg.CustodyInfo.CustodyGroupSamplingSize(peerdas.Actual)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupSamplingSize)
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
// Compute the sampling size.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#custody-sampling
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
samplingSize := max(samplesPerSlot, custodyGroupCount)
|
||||
|
||||
// Get the peer info for the node.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
@@ -711,7 +741,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
summary := s.dataColumnStorage.Summary(root)
|
||||
storedDataColumnsCount := summary.Count()
|
||||
|
||||
minimumColumnCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimumColumnCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
|
||||
// As soon as we have enough data column sidecars, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
@@ -814,7 +844,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
missingIndices = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndices)
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,6 +30,10 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ErrInvalidCheckpointArgs may be returned when the finalized checkpoint has an epoch greater than the justified checkpoint epoch.
|
||||
// If you are seeing this error, make sure you haven't mixed up the order of the arguments in the method you are calling.
|
||||
var ErrInvalidCheckpointArgs = errors.New("finalized checkpoint cannot be greater than justified checkpoint")
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
func (s *Service) CurrentSlot() primitives.Slot {
|
||||
return slots.CurrentSlot(s.genesisTime)
|
||||
@@ -454,6 +458,9 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
if fCheckpoint.Epoch > jCheckpoint.Epoch {
|
||||
return ErrInvalidCheckpointArgs
|
||||
}
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
|
||||
// Fork choice only matters from last finalized slot.
|
||||
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -374,6 +375,81 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
|
||||
require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error())
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_ErrorCases(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
finalizedEpoch primitives.Epoch
|
||||
justifiedEpoch primitives.Epoch
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "finalized epoch greater than justified epoch",
|
||||
finalizedEpoch: 5,
|
||||
justifiedEpoch: 3,
|
||||
expectedError: ErrInvalidCheckpointArgs,
|
||||
},
|
||||
{
|
||||
name: "valid case - finalized equal to justified",
|
||||
finalizedEpoch: 3,
|
||||
justifiedEpoch: 3,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "valid case - finalized less than justified",
|
||||
finalizedEpoch: 2,
|
||||
justifiedEpoch: 3,
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
// Create a simple block for testing
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 10
|
||||
blk.Block.ParentRoot = service.originBlockRoot[:]
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
|
||||
// Create checkpoints with test case epochs
|
||||
finalizedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: tt.finalizedEpoch,
|
||||
Root: service.originBlockRoot[:],
|
||||
}
|
||||
justifiedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: tt.justifiedEpoch,
|
||||
Root: service.originBlockRoot[:],
|
||||
}
|
||||
|
||||
// Set up forkchoice store to avoid other errors
|
||||
fcp := ðpb.Checkpoint{Epoch: 0, Root: service.originBlockRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, service.originBlockRoot, service.originBlockRoot, [32]byte{}, fcp, fcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
t.Context(), wsb, finalizedCheckpoint, justifiedCheckpoint)
|
||||
|
||||
if tt.expectedError != nil {
|
||||
require.ErrorIs(t, err, tt.expectedError)
|
||||
} else {
|
||||
// For valid cases, we might get other errors (like block not being descendant of finalized)
|
||||
// but we shouldn't get the checkpoint validation error
|
||||
if err != nil && errors.Is(err, tt.expectedError) {
|
||||
t.Errorf("Unexpected checkpoint validation error: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// blockTree1 constructs the following tree:
|
||||
//
|
||||
// /- B1
|
||||
@@ -1980,14 +2056,15 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
genesisState, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := genesisState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
gb := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(gb)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
genesisRoot, err := gb.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
|
||||
genesis.StoreStateDuringTest(t, genesisState)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, genesisRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
@@ -2130,13 +2207,13 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
|
||||
// Forkchoice has the genesisRoot loaded at startup
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
// Service's store has the finalized state as headRoot
|
||||
// Service's store has the justified checkpoint root as headRoot (verified below through justified checkpoint comparison)
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
|
||||
require.NotEqual(t, bytesutil.ToBytes32(params.BeaconConfig().ZeroHash[:]), bytesutil.ToBytes32(headRoot)) // Ensure head is not zero
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
require.Equal(t, true, optimistic) // Head is now optimistic when starting from justified checkpoint
|
||||
|
||||
// Check that the node's justified checkpoint does not agree with the
|
||||
// last valid state's justified checkpoint
|
||||
@@ -2875,46 +2952,52 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
params := testIsAvailableParams{options: []Option{WithGenesisTime(time.Unix(0, 0))}}
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - no commitment in blocks", func(t *testing.T) {
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testIsAvailableParams{})
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - more than half of the columns in custody", func(t *testing.T) {
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
|
||||
for i := range minimumColumnsCountToReconstruct {
|
||||
indices = append(indices, i)
|
||||
}
|
||||
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - no missing data columns", func(t *testing.T) {
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
columnsToSave: []uint64{1, 17, 19, 42, 75, 87, 102, 117, 119}, // 119 is not needed
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -2922,7 +3005,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{}), WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
options: []Option{WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
columnsToSave: []uint64{1, 17, 19, 75, 102, 117, 119}, // 119 is not needed, 42 and 87 are missing
|
||||
|
||||
blobKzgCommitmentsCount: 3,
|
||||
@@ -2959,7 +3042,12 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = service.isDataAvailable(ctx, root, signed)
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
|
||||
defer cancel()
|
||||
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -2971,11 +3059,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(cgc)
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(cgc)
|
||||
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct-missingColumns)
|
||||
|
||||
for i := range minimumColumnsCountToReconstruct - missingColumns {
|
||||
@@ -2983,12 +3067,14 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
}
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&custodyInfo), WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
options: []Option{WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
_, _, err := service.cfg.P2P.UpdateCustodyInfo(0, cgc)
|
||||
require.NoError(t, err)
|
||||
block := signed.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
@@ -3020,7 +3106,12 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = service.isDataAvailable(ctx, root, signed)
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
|
||||
defer cancel()
|
||||
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -3028,7 +3119,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{}), WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
options: []Option{WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
@@ -3039,7 +3130,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
cancel()
|
||||
}()
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/slasher/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -84,7 +83,11 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
}
|
||||
|
||||
receivedTime := time.Now()
|
||||
s.blockBeingSynced.set(blockRoot)
|
||||
err := s.blockBeingSynced.set(blockRoot)
|
||||
if errors.Is(err, errBlockBeingSynced) {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring block currently being synced")
|
||||
return nil
|
||||
}
|
||||
defer s.blockBeingSynced.unset(blockRoot)
|
||||
|
||||
blockCopy, err := block.Copy()
|
||||
@@ -108,7 +111,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
return err
|
||||
}
|
||||
|
||||
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
|
||||
daWaitedTime, err := s.handleDA(ctx, avs, roblock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -236,37 +239,19 @@ func (s *Service) validateExecutionAndConsensus(
|
||||
return postState, isValidPayload, nil
|
||||
}
|
||||
|
||||
func (s *Service) handleDA(
|
||||
ctx context.Context,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [fieldparams.RootLength]byte,
|
||||
avs das.AvailabilityStore,
|
||||
) (elapsed time.Duration, err error) {
|
||||
defer func(start time.Time) {
|
||||
elapsed = time.Since(start)
|
||||
|
||||
if err == nil {
|
||||
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
|
||||
}
|
||||
}(time.Now())
|
||||
|
||||
if avs == nil {
|
||||
if err = s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityStore, block blocks.ROBlock) (time.Duration, error) {
|
||||
var err error
|
||||
start := time.Now()
|
||||
if avs != nil {
|
||||
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), block)
|
||||
} else {
|
||||
err = s.isDataAvailable(ctx, block)
|
||||
}
|
||||
|
||||
var rob blocks.ROBlock
|
||||
rob, err = blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
elapsed := time.Since(start)
|
||||
if err != nil {
|
||||
return
|
||||
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
|
||||
}
|
||||
|
||||
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), rob)
|
||||
|
||||
return
|
||||
return elapsed, err
|
||||
}
|
||||
|
||||
func (s *Service) reportPostBlockProcessing(
|
||||
|
||||
@@ -192,7 +192,9 @@ func TestHandleDA(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
s, _ := minimalTestService(t)
|
||||
elapsed, err := s.handleDA(t.Context(), signedBeaconBlock, [fieldparams.RootLength]byte{}, nil)
|
||||
block, err := blocks.NewROBlockWithRoot(signedBeaconBlock, [32]byte{})
|
||||
require.NoError(t, err)
|
||||
elapsed, err := s.handleDA(t.Context(), nil, block)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, elapsed > 0, "Elapsed time should be greater than 0")
|
||||
}
|
||||
@@ -311,7 +313,10 @@ func TestService_HasBlock(t *testing.T) {
|
||||
r, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, s.HasBlock(t.Context(), r))
|
||||
s.blockBeingSynced.set(r)
|
||||
err = s.blockBeingSynced.set(r)
|
||||
require.NoError(t, err)
|
||||
err = s.blockBeingSynced.set(r)
|
||||
require.ErrorIs(t, err, errBlockBeingSynced)
|
||||
require.Equal(t, false, s.HasBlock(t.Context(), r))
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ func (s *Service) ReceiveDataColumns(dataColumnSidecars []blocks.VerifiedRODataC
|
||||
// ReceiveDataColumn receives a single data column.
|
||||
func (s *Service) ReceiveDataColumn(dataColumnSidecar blocks.VerifiedRODataColumn) error {
|
||||
if err := s.dataColumnStorage.Save([]blocks.VerifiedRODataColumn{dataColumnSidecar}); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
return errors.Wrap(err, "save data column sidecar")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -12,11 +12,9 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
@@ -31,6 +29,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -97,7 +96,6 @@ type config struct {
|
||||
FinalizedStateAtStartUp state.BeaconState
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
SyncChecker Checker
|
||||
CustodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// Checker is an interface used to determine if a node is in initial sync
|
||||
@@ -208,17 +206,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
|
||||
// Start a blockchain service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
saved := s.cfg.FinalizedStateAtStartUp
|
||||
defer s.removeStartupState()
|
||||
|
||||
if saved != nil && !saved.IsNil() {
|
||||
if err := s.StartFromSavedState(saved); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
if err := s.startFromExecutionChain(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := s.StartFromSavedState(s.cfg.FinalizedStateAtStartUp); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
s.spawnProcessAttestationsRoutine()
|
||||
go s.runLateBlockTasks()
|
||||
@@ -267,6 +257,9 @@ func (s *Service) Status() error {
|
||||
|
||||
// StartFromSavedState initializes the blockchain using a previously saved finalized checkpoint.
|
||||
func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if state.IsNil(saved) {
|
||||
return errors.New("Last finalized state at startup is nil")
|
||||
}
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = saved.GenesisTime()
|
||||
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
|
||||
@@ -296,6 +289,20 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil {
|
||||
return errors.Wrap(err, "failed to initialize blockchain service")
|
||||
}
|
||||
|
||||
if !params.FuluEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodySubnetCount, err := s.updateCustodyInfoInDB(saved.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get and save custody group count")
|
||||
}
|
||||
|
||||
if _, _, err := s.cfg.P2P.UpdateCustodyInfo(earliestAvailableSlot, custodySubnetCount); err != nil {
|
||||
return errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -358,62 +365,6 @@ func (s *Service) initializeHead(ctx context.Context, st state.BeaconState) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) startFromExecutionChain() error {
|
||||
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
|
||||
if s.cfg.ChainStartFetcher == nil {
|
||||
return errors.New("not configured execution chain")
|
||||
}
|
||||
go func() {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case e := <-stateChannel:
|
||||
if e.Type == statefeed.ChainStarted {
|
||||
data, ok := e.Data.(*statefeed.ChainStartedData)
|
||||
if !ok {
|
||||
log.Error("Event data is not type *statefeed.ChainStartedData")
|
||||
return
|
||||
}
|
||||
log.WithField("startTime", data.StartTime).Debug("Received chain start event")
|
||||
s.onExecutionChainStart(s.ctx, data.StartTime)
|
||||
return
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state forRoot failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// onExecutionChainStart initializes a series of deposits from the ChainStart deposits in the eth1
|
||||
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
|
||||
func (s *Service) onExecutionChainStart(ctx context.Context, genesisTime time.Time) {
|
||||
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
|
||||
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Could not initialize beacon chain")
|
||||
}
|
||||
// We start a counter to genesis, if needed.
|
||||
gRoot, err := initializedState.HashTreeRoot(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Could not hash tree root genesis state")
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
|
||||
|
||||
vr := bytesutil.ToBytes32(initializedState.GenesisValidatorsRoot())
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(genesisTime, vr)); err != nil {
|
||||
log.WithError(err).Fatal("Failed to initialize blockchain service from execution start event")
|
||||
}
|
||||
}
|
||||
|
||||
// initializes the state and genesis block of the beacon chain to persistent storage
|
||||
// based on a genesis timestamp value obtained from the ChainStart event emitted
|
||||
// by the ETH1.0 Deposit Contract and the POWChain service of the node.
|
||||
@@ -516,6 +467,57 @@ func (s *Service) removeStartupState() {
|
||||
s.cfg.FinalizedStateAtStartUp = nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfoInDB updates the custody information in the database.
|
||||
// It returns the (potentially updated) custody group count and the earliest available slot.
|
||||
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
|
||||
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
custodyRequirement := beaconConfig.CustodyRequirement
|
||||
|
||||
// Check if the node was previously subscribed to all data subnets, and if so,
|
||||
// store the new status accordingly.
|
||||
wasSubscribedToAllDataSubnets, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSubscribedToAllDataSubnets)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update subscription status to all data subnets")
|
||||
}
|
||||
|
||||
// Warn the user if the node was previously subscribed to all data subnets and is not any more.
|
||||
if wasSubscribedToAllDataSubnets && !isSubscribedToAllDataSubnets {
|
||||
log.Warnf(
|
||||
"Because the flag `--%s` was previously used, the node will still subscribe to all data subnets.",
|
||||
flags.SubscribeAllDataSubnets.Name,
|
||||
)
|
||||
}
|
||||
|
||||
// Compute the custody group count.
|
||||
custodyGroupCount := custodyRequirement
|
||||
if isSubscribedToAllDataSubnets {
|
||||
custodyGroupCount = beaconConfig.NumberOfColumns
|
||||
}
|
||||
|
||||
// Safely compute the fulu fork slot.
|
||||
fuluForkSlot, err := fuluForkSlot()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "fulu fork slot")
|
||||
}
|
||||
|
||||
// If slot is before the fulu fork slot, then use the earliest stored slot as the reference slot.
|
||||
if slot < fuluForkSlot {
|
||||
slot, err = s.cfg.BeaconDB.EarliestSlot(s.ctx)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "earliest slot")
|
||||
}
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, custodyGroupCount)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
currentTime := prysmTime.Now()
|
||||
if currentTime.After(genesisTime) {
|
||||
@@ -532,3 +534,19 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
|
||||
}
|
||||
|
||||
func fuluForkSlot() (primitives.Slot, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
fuluForkEpoch := beaconConfig.FuluForkEpoch
|
||||
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
|
||||
return beaconConfig.FarFutureSlot, nil
|
||||
}
|
||||
|
||||
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "epoch start")
|
||||
}
|
||||
|
||||
return forkFuluSlot, nil
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/container/trie"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -51,6 +52,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
srv.Stop()
|
||||
})
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
genesis.StoreStateDuringTest(t, bState)
|
||||
pbState, err := state_native.ProtobufBeaconStatePhase0(bState.ToProtoUnsafe())
|
||||
require.NoError(t, err)
|
||||
mockTrie, err := trie.NewTrie(0)
|
||||
@@ -71,20 +73,22 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
DepositContainers: []*ethpb.DepositContainer{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
web3Service, err = execution.NewService(
|
||||
ctx,
|
||||
execution.WithDatabase(beaconDB),
|
||||
execution.WithHttpEndpoint(endpoint),
|
||||
execution.WithDepositContractAddress(common.Address{}),
|
||||
execution.WithDepositCache(depositCache),
|
||||
)
|
||||
require.NoError(t, err, "Unable to set up web3 service")
|
||||
|
||||
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
depositCache, err := depositsnapshot.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
stateGen := stategen.New(beaconDB, fc)
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
@@ -396,24 +400,6 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
|
||||
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(s.ctx, r))
|
||||
}
|
||||
|
||||
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
mgs := &MockClockSetter{}
|
||||
service.clockSetter = mgs
|
||||
gt := time.Now()
|
||||
service.onExecutionChainStart(t.Context(), gt)
|
||||
gs, err := beaconDB.GenesisState(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, nil, gs)
|
||||
require.Equal(t, 32, len(gs.GenesisValidatorsRoot()))
|
||||
var zero [32]byte
|
||||
require.DeepNotEqual(t, gs.GenesisValidatorsRoot(), zero[:])
|
||||
require.Equal(t, gt, mgs.G.GenesisTime())
|
||||
require.Equal(t, bytesutil.ToBytes32(gs.GenesisValidatorsRoot()), mgs.G.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
func BenchmarkHasBlockDB(b *testing.B) {
|
||||
ctx := b.Context()
|
||||
s := testServiceWithDB(b)
|
||||
|
||||
@@ -20,7 +20,7 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
|
||||
return errors.Wrap(err, "could not set up forkchoice checkpoints")
|
||||
}
|
||||
if err := s.setupForkchoiceTree(st); err != nil {
|
||||
return errors.Wrap(err, "could not set up forkchoice root")
|
||||
return errors.Wrap(err, "could not set up forkchoice tree")
|
||||
}
|
||||
if err := s.initializeHead(s.ctx, st); err != nil {
|
||||
return errors.Wrap(err, "could not initialize head from db")
|
||||
@@ -30,24 +30,24 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
|
||||
|
||||
func (s *Service) startupHeadRoot() [32]byte {
|
||||
headStr := features.Get().ForceHead
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
jp := s.CurrentJustifiedCheckpt()
|
||||
jRoot := s.ensureRootNotZeros([32]byte(jp.Root))
|
||||
if headStr == "" {
|
||||
return fRoot
|
||||
return jRoot
|
||||
}
|
||||
if headStr == "head" {
|
||||
root, err := s.cfg.BeaconDB.HeadBlockRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head block root, starting with finalized block as head")
|
||||
return fRoot
|
||||
log.WithError(err).Error("Could not get head block root, starting with justified block as head")
|
||||
return jRoot
|
||||
}
|
||||
log.Infof("Using Head root of %#x", root)
|
||||
return root
|
||||
}
|
||||
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not parse head root, starting with finalized block as head")
|
||||
return fRoot
|
||||
log.WithError(err).Error("Could not parse head root, starting with justified block as head")
|
||||
return jRoot
|
||||
}
|
||||
return [32]byte(root)
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func Test_startupHeadRoot(t *testing.T) {
|
||||
})
|
||||
defer resetCfg()
|
||||
require.Equal(t, service.startupHeadRoot(), gr)
|
||||
require.LogsContain(t, hook, "Could not get head block root, starting with finalized block as head")
|
||||
require.LogsContain(t, hook, "Could not get head block root, starting with justified block as head")
|
||||
})
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
@@ -24,12 +24,13 @@ import (
|
||||
p2pTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -54,6 +55,7 @@ type mockBroadcaster struct {
|
||||
|
||||
type mockAccessor struct {
|
||||
mockBroadcaster
|
||||
mockCustodyManager
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
@@ -87,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ uint64, _ blocks.VerifiedRODataColumn) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
@@ -97,6 +99,43 @@ func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.Sig
|
||||
|
||||
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
|
||||
|
||||
// mockCustodyManager is a mock implementation of p2p.CustodyManager
|
||||
type mockCustodyManager struct {
|
||||
mut sync.RWMutex
|
||||
earliestAvailableSlot primitives.Slot
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
return dch.earliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) CustodyGroupCount() (uint64, error) {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
return dch.custodyGroupCount, nil
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
dch.mut.Lock()
|
||||
defer dch.mut.Unlock()
|
||||
|
||||
dch.earliestAvailableSlot = earliestAvailableSlot
|
||||
dch.custodyGroupCount = custodyGroupCount
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) CustodyGroupCountFromPeer(peer.ID) uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var _ p2p.CustodyManager = (*mockCustodyManager)(nil)
|
||||
|
||||
type testServiceRequirements struct {
|
||||
ctx context.Context
|
||||
db db.Database
|
||||
|
||||
@@ -25,6 +25,7 @@ var ErrNoBuilder = errors.New("builder endpoint not configured")
|
||||
// BlockBuilder defines the interface for interacting with the block builder
|
||||
type BlockBuilder interface {
|
||||
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error)
|
||||
SubmitBlindedBlockPostFulu(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) error
|
||||
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error)
|
||||
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
@@ -101,6 +102,22 @@ func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlyS
|
||||
return s.c.SubmitBlindedBlock(ctx, b)
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu submits a blinded block to the builder relay network post-Fulu.
|
||||
// After Fulu, relays only return status codes (no payload).
|
||||
func (s *Service) SubmitBlindedBlockPostFulu(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlockPostFulu")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
submitBlindedBlockLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
if s.c == nil {
|
||||
return ErrNoBuilder
|
||||
}
|
||||
|
||||
return s.c.SubmitBlindedBlockPostFulu(ctx, b)
|
||||
}
|
||||
|
||||
// GetHeader retrieves the header for a given slot and parent hash from the builder relay network.
|
||||
func (s *Service) GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.GetHeader")
|
||||
|
||||
@@ -24,21 +24,22 @@ type Config struct {
|
||||
|
||||
// MockBuilderService to mock builder.
|
||||
type MockBuilderService struct {
|
||||
HasConfigured bool
|
||||
Payload *v1.ExecutionPayload
|
||||
PayloadCapella *v1.ExecutionPayloadCapella
|
||||
PayloadDeneb *v1.ExecutionPayloadDeneb
|
||||
BlobBundle *v1.BlobsBundle
|
||||
BlobBundleV2 *v1.BlobsBundleV2
|
||||
ErrSubmitBlindedBlock error
|
||||
Bid *ethpb.SignedBuilderBid
|
||||
BidCapella *ethpb.SignedBuilderBidCapella
|
||||
BidDeneb *ethpb.SignedBuilderBidDeneb
|
||||
BidElectra *ethpb.SignedBuilderBidElectra
|
||||
RegistrationCache *cache.RegistrationCache
|
||||
ErrGetHeader error
|
||||
ErrRegisterValidator error
|
||||
Cfg *Config
|
||||
HasConfigured bool
|
||||
Payload *v1.ExecutionPayload
|
||||
PayloadCapella *v1.ExecutionPayloadCapella
|
||||
PayloadDeneb *v1.ExecutionPayloadDeneb
|
||||
BlobBundle *v1.BlobsBundle
|
||||
BlobBundleV2 *v1.BlobsBundleV2
|
||||
ErrSubmitBlindedBlock error
|
||||
ErrSubmitBlindedBlockPostFulu error
|
||||
Bid *ethpb.SignedBuilderBid
|
||||
BidCapella *ethpb.SignedBuilderBidCapella
|
||||
BidDeneb *ethpb.SignedBuilderBidDeneb
|
||||
BidElectra *ethpb.SignedBuilderBidElectra
|
||||
RegistrationCache *cache.RegistrationCache
|
||||
ErrGetHeader error
|
||||
ErrRegisterValidator error
|
||||
Cfg *Config
|
||||
}
|
||||
|
||||
// Configured for mocking.
|
||||
@@ -115,3 +116,8 @@ func (s *MockBuilderService) RegistrationByValidatorID(ctx context.Context, id p
|
||||
func (s *MockBuilderService) RegisterValidator(context.Context, []*ethpb.SignedValidatorRegistrationV1) error {
|
||||
return s.ErrRegisterValidator
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu for mocking.
|
||||
func (s *MockBuilderService) SubmitBlindedBlockPostFulu(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
return s.ErrSubmitBlindedBlockPostFulu
|
||||
}
|
||||
|
||||
@@ -41,7 +41,6 @@ go_library(
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -101,7 +100,7 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
// via the respective epoch.
|
||||
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
|
||||
currentEpoch := slots.ToEpoch(blk.Block().Slot())
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
fork, err := params.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -102,13 +102,13 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
return nil, err
|
||||
} else if n == params.BeaconConfig().PendingPartialWithdrawalsLimit && !isFullExitRequest {
|
||||
// if the PendingPartialWithdrawalsLimit is met, the user would have paid for a partial withdrawal that's not included
|
||||
log.Debugln("Skipping execution layer withdrawal request, PendingPartialWithdrawalsLimit reached")
|
||||
log.Debug("Skipping execution layer withdrawal request, PendingPartialWithdrawalsLimit reached")
|
||||
continue
|
||||
}
|
||||
|
||||
vIdx, exists := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(wr.ValidatorPubkey))
|
||||
if !exists {
|
||||
log.Debugf("Skipping execution layer withdrawal request, validator index for %s not found\n", hexutil.Encode(wr.ValidatorPubkey))
|
||||
log.WithField("validator", hexutil.Encode(wr.ValidatorPubkey)).Debug("Skipping execution layer withdrawal request, validator index not found")
|
||||
continue
|
||||
}
|
||||
validator, err := st.ValidatorAtIndexReadOnly(vIdx)
|
||||
@@ -120,23 +120,23 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
wc := validator.GetWithdrawalCredentials()
|
||||
isCorrectSourceAddress := bytes.Equal(wc[12:], wr.SourceAddress)
|
||||
if !hasCorrectCredential || !isCorrectSourceAddress {
|
||||
log.Debugln("Skipping execution layer withdrawal request, wrong withdrawal credentials")
|
||||
log.Debug("Skipping execution layer withdrawal request, wrong withdrawal credentials")
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the validator is active.
|
||||
if !helpers.IsActiveValidatorUsingTrie(validator, currentEpoch) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator not active")
|
||||
log.Debug("Skipping execution layer withdrawal request, validator not active")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has not yet submitted an exit.
|
||||
if validator.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has submitted an exit already")
|
||||
log.Debug("Skipping execution layer withdrawal request, validator has submitted an exit already")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has been active long enough.
|
||||
if currentEpoch < validator.ActivationEpoch().AddEpoch(params.BeaconConfig().ShardCommitteePeriod) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has not been active long enough")
|
||||
log.Debug("Skipping execution layer withdrawal request, validator has not been active long enough")
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
// UpgradeToFulu updates inputs a generic state to return the version Fulu state.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/fork.md#upgrading-the-state
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/fork.md#upgrading-the-state
|
||||
func UpgradeToFulu(ctx context.Context, beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
|
||||
@@ -317,23 +317,15 @@ func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch pri
|
||||
}
|
||||
|
||||
proposerAssignments := make(map[primitives.ValidatorIndex][]primitives.Slot)
|
||||
|
||||
originalStateSlot := state.Slot()
|
||||
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
// Skip proposer assignment for genesis slot.
|
||||
if slot == 0 {
|
||||
continue
|
||||
}
|
||||
// Set the state's current slot.
|
||||
if err := state.SetSlot(slot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the proposer index for the current slot.
|
||||
i, err := BeaconProposerIndex(ctx, state)
|
||||
i, err := BeaconProposerIndexAtSlot(ctx, state, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check proposer at slot %d", state.Slot())
|
||||
return nil, errors.Wrapf(err, "could not check proposer at slot %d", slot)
|
||||
}
|
||||
|
||||
// Append the slot to the proposer's assignments.
|
||||
@@ -342,12 +334,6 @@ func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch pri
|
||||
}
|
||||
proposerAssignments[i] = append(proposerAssignments[i], slot)
|
||||
}
|
||||
|
||||
// Reset state back to its original slot.
|
||||
if err := state.SetSlot(originalStateSlot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return proposerAssignments, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -78,6 +78,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -264,6 +265,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -309,23 +309,29 @@ func beaconProposerIndexAtSlotFulu(state state.ReadOnlyBeaconState, slot primiti
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get proposer lookahead")
|
||||
}
|
||||
spe := params.BeaconConfig().SlotsPerEpoch
|
||||
if e == stateEpoch {
|
||||
return lookAhead[slot%params.BeaconConfig().SlotsPerEpoch], nil
|
||||
return lookAhead[slot%spe], nil
|
||||
}
|
||||
// The caller is requesting the proposer for the next epoch
|
||||
return lookAhead[slot%params.BeaconConfig().SlotsPerEpoch+params.BeaconConfig().SlotsPerEpoch], nil
|
||||
return lookAhead[spe+slot%spe], nil
|
||||
}
|
||||
|
||||
// BeaconProposerIndexAtSlot returns proposer index at the given slot from the
|
||||
// point of view of the given state as head state
|
||||
func BeaconProposerIndexAtSlot(ctx context.Context, state state.ReadOnlyBeaconState, slot primitives.Slot) (primitives.ValidatorIndex, error) {
|
||||
if state.Version() >= version.Fulu {
|
||||
return beaconProposerIndexAtSlotFulu(state, slot)
|
||||
}
|
||||
e := slots.ToEpoch(slot)
|
||||
stateEpoch := slots.ToEpoch(state.Slot())
|
||||
// Even if the state is post Fulu, we may request a past proposer index.
|
||||
if state.Version() >= version.Fulu && e >= params.BeaconConfig().FuluForkEpoch {
|
||||
// We can use the cached lookahead only for the current and the next epoch.
|
||||
if e == stateEpoch || e == stateEpoch+1 {
|
||||
return beaconProposerIndexAtSlotFulu(state, slot)
|
||||
}
|
||||
}
|
||||
// The cache uses the state root of the previous epoch - minimum_seed_lookahead last slot as key. (e.g. Starting epoch 1, slot 32, the key would be block root at slot 31)
|
||||
// For simplicity, the node will skip caching of genesis epoch.
|
||||
if e > params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
|
||||
// For simplicity, the node will skip caching of genesis epoch. If the passed state has not yet reached this slot then we do not check the cache.
|
||||
if e <= stateEpoch && e > params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
|
||||
s, err := slots.EpochEnd(e - 1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
||||
@@ -1161,6 +1161,10 @@ func TestValidatorMaxEffectiveBalance(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBeaconProposerIndexAtSlotFulu(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
lookahead := make([]uint64, 64)
|
||||
lookahead[0] = 15
|
||||
lookahead[1] = 16
|
||||
@@ -1180,8 +1184,4 @@ func TestBeaconProposerIndexAtSlotFulu(t *testing.T) {
|
||||
idx, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 130)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(42), idx)
|
||||
_, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 95)
|
||||
require.ErrorContains(t, "slot 95 is not in the current epoch 3 or the next epoch", err)
|
||||
_, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 160)
|
||||
require.ErrorContains(t, "slot 160 is not in the current epoch 3 or the next epoch", err)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"das_core.go",
|
||||
"info.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"p2p_interface.go",
|
||||
"reconstruction.go",
|
||||
@@ -16,11 +17,9 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
@@ -34,6 +33,7 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -53,7 +53,6 @@ go_test(
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -4,15 +4,10 @@ import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
@@ -20,26 +15,16 @@ import (
|
||||
|
||||
var (
|
||||
// Custom errors
|
||||
ErrCustodyGroupTooLarge = errors.New("custody group too large")
|
||||
ErrCustodyGroupCountTooLarge = errors.New("custody group count too large")
|
||||
ErrSizeMismatch = errors.New("mismatch in the number of blob KZG commitments and cellsAndProofs")
|
||||
ErrNotEnoughDataColumnSidecars = errors.New("not enough columns")
|
||||
ErrDataColumnSidecarsNotSortedByIndex = errors.New("data column sidecars are not sorted by index")
|
||||
errWrongComputedCustodyGroupCount = errors.New("wrong computed custody group count, should never happen")
|
||||
ErrCustodyGroupTooLarge = errors.New("custody group too large")
|
||||
ErrCustodyGroupCountTooLarge = errors.New("custody group count too large")
|
||||
errWrongComputedCustodyGroupCount = errors.New("wrong computed custody group count, should never happen")
|
||||
|
||||
// maxUint256 is the maximum value of an uint256.
|
||||
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
|
||||
)
|
||||
|
||||
type CustodyType int
|
||||
|
||||
const (
|
||||
Target CustodyType = iota
|
||||
Actual
|
||||
)
|
||||
|
||||
// CustodyGroups computes the custody groups the node should participate in for custody.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#get_custody_groups
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#get_custody_groups
|
||||
func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error) {
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
|
||||
@@ -102,7 +87,7 @@ func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error)
|
||||
}
|
||||
|
||||
// ComputeColumnsForCustodyGroup computes the columns for a given custody group.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#compute_columns_for_custody_group
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#compute_columns_for_custody_group
|
||||
func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
|
||||
@@ -124,44 +109,6 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block, cells and cell proofs.
|
||||
// The returned value contains pointers to function parameters.
|
||||
// (If the caller alterates `cellsAndProofs` afterwards, the returned value will be modified as well.)
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/fulu/validator.md#get_data_column_sidecars_from_block
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsAndProofs []kzg.CellsAndProofs) ([]*ethpb.DataColumnSidecar, error) {
|
||||
if signedBlock == nil || signedBlock.IsNil() || len(cellsAndProofs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
block := signedBlock.Block()
|
||||
blockBody := block.Body()
|
||||
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
if len(blobKzgCommitments) != len(cellsAndProofs) {
|
||||
return nil, ErrSizeMismatch
|
||||
}
|
||||
|
||||
signedBlockHeader, err := signedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signed block header")
|
||||
}
|
||||
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof KZG commitments")
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := dataColumnsSidecars(signedBlockHeader, blobKzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars")
|
||||
}
|
||||
|
||||
return dataColumnSidecars, nil
|
||||
}
|
||||
|
||||
// ComputeCustodyGroupForColumn computes the custody group for a given column.
|
||||
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
|
||||
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
|
||||
@@ -176,19 +123,6 @@ func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
|
||||
return columnIndex % numberOfCustodyGroups, nil
|
||||
}
|
||||
|
||||
// CustodyGroupSamplingSize returns the number of custody groups the node should sample from.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#custody-sampling
|
||||
func (custodyInfo *CustodyInfo) CustodyGroupSamplingSize(ct CustodyType) uint64 {
|
||||
custodyGroupCount := custodyInfo.TargetGroupCount.Get()
|
||||
|
||||
if ct == Actual {
|
||||
custodyGroupCount = custodyInfo.ActualGroupCount()
|
||||
}
|
||||
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
return max(samplesPerSlot, custodyGroupCount)
|
||||
}
|
||||
|
||||
// CustodyColumns computes the custody columns from the custody groups.
|
||||
func CustodyColumns(custodyGroups []uint64) (map[uint64]bool, error) {
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
@@ -214,64 +148,3 @@ func CustodyColumns(custodyGroups []uint64) (map[uint64]bool, error) {
|
||||
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// dataColumnsSidecars computes the data column sidecars from the signed block header, the blob KZG commiments,
|
||||
// the KZG commitment includion proofs and cells and cell proofs.
|
||||
// The returned value contains pointers to function parameters.
|
||||
// (If the caller alterates input parameters afterwards, the returned value will be modified as well.)
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/fulu/validator.md#get_data_column_sidecars
|
||||
func dataColumnsSidecars(
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
|
||||
blobKzgCommitments [][]byte,
|
||||
kzgCommitmentsInclusionProof [][]byte,
|
||||
cellsAndProofs []kzg.CellsAndProofs,
|
||||
) ([]*ethpb.DataColumnSidecar, error) {
|
||||
start := time.Now()
|
||||
if len(blobKzgCommitments) != len(cellsAndProofs) {
|
||||
return nil, ErrSizeMismatch
|
||||
}
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
blobsCount := len(cellsAndProofs)
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns)
|
||||
for columnIndex := range numberOfColumns {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := range blobsCount {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, kzgProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
Index: columnIndex,
|
||||
Column: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProofs: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
@@ -3,13 +3,9 @@ package peerdas_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
@@ -31,44 +27,6 @@ func TestComputeColumnsForCustodyGroup(t *testing.T) {
|
||||
require.ErrorIs(t, err, peerdas.ErrCustodyGroupTooLarge)
|
||||
}
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
t.Run("nil signed block", func(t *testing.T) {
|
||||
var expected []*ethpb.DataColumnSidecar = nil
|
||||
actual, err := peerdas.DataColumnSidecars(nil, []kzg.CellsAndProofs{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
})
|
||||
|
||||
t.Run("empty cells and proofs", func(t *testing.T) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := peerdas.DataColumnSidecars(signedBeaconBlock, []kzg.CellsAndProofs{})
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, actual)
|
||||
})
|
||||
|
||||
t.Run("sizes mismatch", func(t *testing.T) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs.
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 1)
|
||||
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorIs(t, err, peerdas.ErrSizeMismatch)
|
||||
})
|
||||
}
|
||||
|
||||
func TestComputeCustodyGroupForColumn(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
@@ -104,62 +62,6 @@ func TestComputeCustodyGroupForColumn(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestCustodyGroupSamplingSize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
custodyType peerdas.CustodyType
|
||||
validatorsCustodyRequirement uint64
|
||||
toAdvertiseCustodyGroupCount uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "target, lower than samples per slot",
|
||||
custodyType: peerdas.Target,
|
||||
validatorsCustodyRequirement: 2,
|
||||
expected: 8,
|
||||
},
|
||||
{
|
||||
name: "target, higher than samples per slot",
|
||||
custodyType: peerdas.Target,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 100,
|
||||
},
|
||||
{
|
||||
name: "actual, lower than samples per slot",
|
||||
custodyType: peerdas.Actual,
|
||||
validatorsCustodyRequirement: 3,
|
||||
toAdvertiseCustodyGroupCount: 4,
|
||||
expected: 8,
|
||||
},
|
||||
{
|
||||
name: "actual, higher than samples per slot",
|
||||
custodyType: peerdas.Actual,
|
||||
validatorsCustodyRequirement: 100,
|
||||
toAdvertiseCustodyGroupCount: 101,
|
||||
expected: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create a custody info.
|
||||
custodyInfo := peerdas.CustodyInfo{}
|
||||
|
||||
// Set the validators custody requirement for target custody group count.
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
|
||||
|
||||
// Set the to advertise custody group count.
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
|
||||
|
||||
// Compute the custody group sampling size.
|
||||
actual := custodyInfo.CustodyGroupSamplingSize(tc.custodyType)
|
||||
|
||||
// Check the result.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyColumns(t *testing.T) {
|
||||
t.Run("group too large", func(t *testing.T) {
|
||||
_, err := peerdas.CustodyColumns([]uint64{1_000_000})
|
||||
|
||||
@@ -4,45 +4,17 @@ import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// info contains all useful peerDAS related information regarding a peer.
|
||||
type (
|
||||
info struct {
|
||||
CustodyGroups map[uint64]bool
|
||||
CustodyColumns map[uint64]bool
|
||||
DataColumnsSubnets map[uint64]bool
|
||||
}
|
||||
|
||||
targetCustodyGroupCount struct {
|
||||
mut sync.RWMutex
|
||||
validatorsCustodyRequirement uint64
|
||||
}
|
||||
|
||||
toAdverstiseCustodyGroupCount struct {
|
||||
mut sync.RWMutex
|
||||
value uint64
|
||||
}
|
||||
|
||||
CustodyInfo struct {
|
||||
// Mut is a mutex to be used by caller to ensure neither
|
||||
// TargetCustodyGroupCount nor ToAdvertiseCustodyGroupCount are being modified.
|
||||
// (This is not necessary to use this mutex for any data protection.)
|
||||
Mut sync.RWMutex
|
||||
|
||||
// TargetGroupCount represents the target number of custody groups we should custody
|
||||
// regarding the validators we are tracking.
|
||||
TargetGroupCount targetCustodyGroupCount
|
||||
|
||||
// ToAdvertiseGroupCount represents the number of custody groups to advertise to the network.
|
||||
ToAdvertiseGroupCount toAdverstiseCustodyGroupCount
|
||||
}
|
||||
)
|
||||
type info struct {
|
||||
CustodyGroups map[uint64]bool
|
||||
CustodyColumns map[uint64]bool
|
||||
DataColumnsSubnets map[uint64]bool
|
||||
}
|
||||
|
||||
const (
|
||||
nodeInfoCacheSize = 200
|
||||
@@ -109,61 +81,6 @@ func Info(nodeID enode.ID, custodyGroupCount uint64) (*info, bool, error) {
|
||||
return result, false, nil
|
||||
}
|
||||
|
||||
// ActualGroupCount returns the actual custody group count.
|
||||
func (custodyInfo *CustodyInfo) ActualGroupCount() uint64 {
|
||||
return min(custodyInfo.TargetGroupCount.Get(), custodyInfo.ToAdvertiseGroupCount.Get())
|
||||
}
|
||||
|
||||
// CustodyGroupCount returns the number of groups we should participate in for custody.
|
||||
func (tcgc *targetCustodyGroupCount) Get() uint64 {
|
||||
// If subscribed to all subnets, return the number of custody groups.
|
||||
if flags.Get().SubscribeAllDataSubnets {
|
||||
return params.BeaconConfig().NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
tcgc.mut.RLock()
|
||||
defer tcgc.mut.RUnlock()
|
||||
|
||||
// If no validators are tracked, return the default custody requirement.
|
||||
if tcgc.validatorsCustodyRequirement == 0 {
|
||||
return params.BeaconConfig().CustodyRequirement
|
||||
}
|
||||
|
||||
// Return the validators custody requirement.
|
||||
return tcgc.validatorsCustodyRequirement
|
||||
}
|
||||
|
||||
// setValidatorsCustodyRequirement sets the validators custody requirement.
|
||||
func (tcgc *targetCustodyGroupCount) SetValidatorsCustodyRequirement(value uint64) {
|
||||
tcgc.mut.Lock()
|
||||
defer tcgc.mut.Unlock()
|
||||
|
||||
tcgc.validatorsCustodyRequirement = value
|
||||
}
|
||||
|
||||
// Get returns the to advertise custody group count.
|
||||
func (tacgc *toAdverstiseCustodyGroupCount) Get() uint64 {
|
||||
// If subscribed to all subnets, return the number of custody groups.
|
||||
if flags.Get().SubscribeAllDataSubnets {
|
||||
return params.BeaconConfig().NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
tacgc.mut.RLock()
|
||||
defer tacgc.mut.RUnlock()
|
||||
|
||||
return max(tacgc.value, custodyRequirement)
|
||||
}
|
||||
|
||||
// Set sets the to advertise custody group count.
|
||||
func (tacgc *toAdverstiseCustodyGroupCount) Set(value uint64) {
|
||||
tacgc.mut.Lock()
|
||||
defer tacgc.mut.Unlock()
|
||||
|
||||
tacgc.value = value
|
||||
}
|
||||
|
||||
// createInfoCacheIfNeeded creates a new cache if it doesn't exist.
|
||||
func createInfoCacheIfNeeded() error {
|
||||
nodeInfoCacheMut.Lock()
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
@@ -26,108 +25,3 @@ func TestInfo(t *testing.T) {
|
||||
require.DeepEqual(t, expectedDataColumnsSubnets, actual.DataColumnsSubnets)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetCustodyGroupCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeToAllColumns bool
|
||||
validatorsCustodyRequirement uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "subscribed to all data subnets",
|
||||
subscribeToAllColumns: true,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 128,
|
||||
},
|
||||
{
|
||||
name: "no validators attached",
|
||||
subscribeToAllColumns: false,
|
||||
validatorsCustodyRequirement: 0,
|
||||
expected: 4,
|
||||
},
|
||||
{
|
||||
name: "some validators attached",
|
||||
subscribeToAllColumns: false,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Subscribe to all subnets if needed.
|
||||
if tc.subscribeToAllColumns {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
}
|
||||
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
|
||||
// Set the validators custody requirement.
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
|
||||
|
||||
// Get the target custody group count.
|
||||
actual := custodyInfo.TargetGroupCount.Get()
|
||||
|
||||
// Compare the expected and actual values.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestToAdvertiseCustodyGroupCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeToAllColumns bool
|
||||
toAdvertiseCustodyGroupCount uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "subscribed to all subnets",
|
||||
subscribeToAllColumns: true,
|
||||
toAdvertiseCustodyGroupCount: 100,
|
||||
expected: 128,
|
||||
},
|
||||
{
|
||||
name: "higher than custody requirement",
|
||||
subscribeToAllColumns: false,
|
||||
toAdvertiseCustodyGroupCount: 100,
|
||||
expected: 100,
|
||||
},
|
||||
{
|
||||
name: "lower than custody requirement",
|
||||
subscribeToAllColumns: false,
|
||||
toAdvertiseCustodyGroupCount: 1,
|
||||
expected: 4,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Subscribe to all subnets if needed.
|
||||
if tc.subscribeToAllColumns {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
}
|
||||
|
||||
// Create a custody info.
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
|
||||
// Set the to advertise custody group count.
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
|
||||
|
||||
// Get the to advertise custody group count.
|
||||
actual := custodyInfo.ToAdvertiseGroupCount.Get()
|
||||
|
||||
// Compare the expected and actual values.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,10 +10,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
CustodyGroupCountEnrKey = "cgc"
|
||||
kzgPosition = 11 // The index of the KZG commitment list in the Body
|
||||
)
|
||||
const kzgPosition = 11 // The index of the KZG commitment list in the Body
|
||||
|
||||
var (
|
||||
ErrIndexTooLarge = errors.New("column index is larger than the specified columns count")
|
||||
@@ -27,13 +24,13 @@ var (
|
||||
ErrCannotLoadCustodyGroupCount = errors.New("cannot load the custody group count from peer")
|
||||
)
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#custody-group-count
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#custody-group-count
|
||||
type Cgc uint64
|
||||
|
||||
func (Cgc) ENRKey() string { return CustodyGroupCountEnrKey }
|
||||
func (Cgc) ENRKey() string { return params.BeaconNetworkConfig().CustodyGroupCountKey }
|
||||
|
||||
// VerifyDataColumnSidecar verifies if the data column sidecar is valid.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
||||
func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
|
||||
// The sidecar index must be within the valid range.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
@@ -60,7 +57,7 @@ func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
|
||||
// while we are verifying all the KZG proofs from multiple sidecars in a batch.
|
||||
// This is done to improve performance since the internal KZG library is way more
|
||||
// efficient when verifying in batch.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
|
||||
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
|
||||
// Compute the total count.
|
||||
count := 0
|
||||
@@ -96,7 +93,7 @@ func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
|
||||
}
|
||||
|
||||
// VerifyDataColumnSidecarInclusionProof verifies if the given KZG commitments included in the given beacon block.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof
|
||||
func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
|
||||
if sidecar.SignedBlockHeader == nil || sidecar.SignedBlockHeader.Header == nil {
|
||||
return ErrNilBlockHeader
|
||||
@@ -128,7 +125,7 @@ func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
|
||||
}
|
||||
|
||||
// ComputeSubnetForDataColumnSidecar computes the subnet for a data column sidecar.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar
|
||||
func ComputeSubnetForDataColumnSidecar(columnIndex uint64) uint64 {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
return columnIndex % dataColumnSidecarSubnetCount
|
||||
|
||||
@@ -63,17 +63,14 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
t.Run("invalid proof", func(t *testing.T) {
|
||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||
sidecars[0].Column[0][0]++ // It is OK to overflow
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(t, sidecars)
|
||||
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars)
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrInvalidKZGProof)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(t, sidecars)
|
||||
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars)
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
@@ -256,9 +253,8 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_SameCommitments_NoBatch(b *testin
|
||||
for i := range int64(b.N) {
|
||||
// Generate new random sidecars to ensure the KZG backend does not cache anything.
|
||||
sidecars := generateRandomSidecars(b, i, blobCount)
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(b, sidecars)
|
||||
|
||||
for _, sidecar := range roDataColumnSidecars {
|
||||
for _, sidecar := range sidecars {
|
||||
sidecars := []blocks.RODataColumn{sidecar}
|
||||
b.StartTimer()
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
@@ -282,7 +278,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.
|
||||
b.ResetTimer()
|
||||
|
||||
for j := range int64(b.N) {
|
||||
allSidecars := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns)
|
||||
allSidecars := make([]blocks.RODataColumn, 0, numberOfColumns)
|
||||
for k := int64(0); k < numberOfColumns; k += columnsCount {
|
||||
// Use different seeds to generate different blobs/commitments
|
||||
seed := int64(b.N*i) + numberOfColumns*j + blobCount*k
|
||||
@@ -292,10 +288,8 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.
|
||||
allSidecars = append(allSidecars, sidecars[k:k+columnsCount]...)
|
||||
}
|
||||
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(b, allSidecars)
|
||||
|
||||
b.StartTimer()
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars)
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allSidecars)
|
||||
b.StopTimer()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -323,8 +317,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch4(b *testing
|
||||
for j := range int64(batchCount) {
|
||||
// Use different seeds to generate different blobs/commitments
|
||||
sidecars := generateRandomSidecars(b, int64(batchCount)*i+j*blobCount, blobCount)
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(b, sidecars[:columnsCount])
|
||||
allSidecars = append(allSidecars, roDataColumnSidecars)
|
||||
allSidecars = append(allSidecars, sidecars)
|
||||
}
|
||||
|
||||
for _, sidecars := range allSidecars {
|
||||
@@ -358,7 +351,7 @@ func createTestSidecar(t *testing.T, index uint64, column, kzgCommitments, kzgPr
|
||||
return roSidecar
|
||||
}
|
||||
|
||||
func generateRandomSidecars(t testing.TB, seed, blobCount int64) []*ethpb.DataColumnSidecar {
|
||||
func generateRandomSidecars(t testing.TB, seed, blobCount int64) []blocks.RODataColumn {
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
|
||||
commitments := make([][]byte, 0, blobCount)
|
||||
@@ -379,20 +372,10 @@ func generateRandomSidecars(t testing.TB, seed, blobCount int64) []*ethpb.DataCo
|
||||
require.NoError(t, err)
|
||||
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
sidecars, err := peerdas.DataColumnSidecars(sBlock, cellsAndProofs)
|
||||
rob, err := blocks.NewROBlock(sBlock)
|
||||
require.NoError(t, err)
|
||||
sidecars, err := peerdas.ConstructDataColumnSidecar(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.NoError(t, err)
|
||||
|
||||
return sidecars
|
||||
}
|
||||
|
||||
func generateRODataColumnSidecars(t testing.TB, sidecars []*ethpb.DataColumnSidecar) []blocks.RODataColumn {
|
||||
roDataColumnSidecars := make([]blocks.RODataColumn, 0, len(sidecars))
|
||||
for _, sidecar := range sidecars {
|
||||
roCol, err := blocks.NewRODataColumn(sidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
roDataColumnSidecars = append(roDataColumnSidecars, roCol)
|
||||
}
|
||||
|
||||
return roDataColumnSidecars
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@@ -18,8 +17,8 @@ var (
|
||||
ErrBlobsCellsProofsMismatch = errors.New("blobs and cells proofs mismatch")
|
||||
)
|
||||
|
||||
// MinimumColumnsCountToReconstruct return the minimum number of columns needed to proceed to a reconstruction.
|
||||
func MinimumColumnsCountToReconstruct() uint64 {
|
||||
// MinimumColumnCountToReconstruct return the minimum number of columns needed to proceed to a reconstruction.
|
||||
func MinimumColumnCountToReconstruct() uint64 {
|
||||
// If the number of columns is odd, then we need total / 2 + 1 columns to reconstruct.
|
||||
// If the number of columns is even, then we need total / 2 columns to reconstruct.
|
||||
return (params.BeaconConfig().NumberOfColumns + 1) / 2
|
||||
@@ -58,16 +57,10 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
|
||||
|
||||
// Check if there is enough sidecars to reconstruct the missing columns.
|
||||
sidecarCount := len(sidecarByIndex)
|
||||
if uint64(sidecarCount) < MinimumColumnsCountToReconstruct() {
|
||||
if uint64(sidecarCount) < MinimumColumnCountToReconstruct() {
|
||||
return nil, ErrNotEnoughDataColumnSidecars
|
||||
}
|
||||
|
||||
// Sidecars are verified and are committed to the same block.
|
||||
// All signed block headers, KZG commitments, and inclusion proofs are the same.
|
||||
signedBlockHeader := referenceSidecar.SignedBlockHeader
|
||||
kzgCommitments := referenceSidecar.KzgCommitments
|
||||
kzgCommitmentsInclusionProof := referenceSidecar.KzgCommitmentsInclusionProof
|
||||
|
||||
// Recover cells and compute proofs in parallel.
|
||||
var wg errgroup.Group
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
@@ -100,7 +93,7 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
|
||||
return nil, errors.Wrap(err, "wait for RecoverCellsAndKZGProofs")
|
||||
}
|
||||
|
||||
outSidecars, err := dataColumnsSidecars(signedBlockHeader, kzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs)
|
||||
outSidecars, err := ConstructDataColumnSidecar(cellsAndProofs, PopulateFromSidecar(referenceSidecar.RODataColumn))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars from items")
|
||||
}
|
||||
@@ -109,71 +102,13 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
|
||||
// As a consequence, reconstructed sidecars are also verified.
|
||||
outVerifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(outSidecars))
|
||||
for _, sidecar := range outSidecars {
|
||||
roSidecar, err := blocks.NewRODataColumnWithRoot(sidecar, blockRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new RO data column with root")
|
||||
}
|
||||
|
||||
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roSidecar)
|
||||
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(sidecar)
|
||||
outVerifiedRoSidecars = append(outVerifiedRoSidecars, verifiedRoSidecar)
|
||||
}
|
||||
|
||||
return outVerifiedRoSidecars, nil
|
||||
}
|
||||
|
||||
// ConstructDataColumnSidecars constructs data column sidecars from a block, (un-extended) blobs and
|
||||
// cell proofs corresponding the extended blobs. The main purpose of this function is to
|
||||
// construct data column sidecars from data obtained from the execution client via:
|
||||
// - `engine_getBlobsV2` - https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#engine_getblobsv2, or
|
||||
// - `engine_getPayloadV5` - https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#engine_getpayloadv5
|
||||
// Note: In this function, to stick with the `BlobsBundleV2` format returned by the execution client in `engine_getPayloadV5`,
|
||||
// cell proofs are "flattened".
|
||||
func ConstructDataColumnSidecars(block interfaces.ReadOnlySignedBeaconBlock, blobs [][]byte, cellProofs [][]byte) ([]*ethpb.DataColumnSidecar, error) {
|
||||
// Check if the cells count is equal to the cell proofs count.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
blobCount := uint64(len(blobs))
|
||||
cellProofsCount := uint64(len(cellProofs))
|
||||
|
||||
cellsCount := blobCount * numberOfColumns
|
||||
if cellsCount != cellProofsCount {
|
||||
return nil, ErrBlobsCellsProofsMismatch
|
||||
}
|
||||
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, blobCount)
|
||||
for i, blob := range blobs {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return nil, errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: proofs}
|
||||
cellsAndProofs = append(cellsAndProofs, cellsProofs)
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := DataColumnSidecars(block, cellsAndProofs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidcars")
|
||||
}
|
||||
|
||||
return dataColumnSidecars, nil
|
||||
}
|
||||
|
||||
// ReconstructBlobs constructs verified read only blobs sidecars from verified read only blob sidecars.
|
||||
// The following constraints must be satisfied:
|
||||
// - All `dataColumnSidecars` has to be committed to the same block, and
|
||||
@@ -256,6 +191,47 @@ func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks.
|
||||
return blobSidecars, nil
|
||||
}
|
||||
|
||||
// ComputeCellsAndProofs computes the cells and proofs from blobs and cell proofs.
|
||||
func ComputeCellsAndProofs(blobs [][]byte, cellProofs [][]byte) ([]kzg.CellsAndProofs, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
blobCount := uint64(len(blobs))
|
||||
cellProofsCount := uint64(len(cellProofs))
|
||||
|
||||
cellsCount := blobCount * numberOfColumns
|
||||
if cellsCount != cellProofsCount {
|
||||
return nil, ErrBlobsCellsProofsMismatch
|
||||
}
|
||||
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, blobCount)
|
||||
for i, blob := range blobs {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return nil, errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: proofs}
|
||||
cellsAndProofs = append(cellsAndProofs, cellsProofs)
|
||||
}
|
||||
|
||||
return cellsAndProofs, nil
|
||||
}
|
||||
|
||||
// blobSidecarsFromDataColumnSidecars converts verified data column sidecars to verified blob sidecars.
|
||||
func blobSidecarsFromDataColumnSidecars(roBlock blocks.ROBlock, dataColumnSidecars []blocks.VerifiedRODataColumn, indices []int) ([]*blocks.VerifiedROBlob, error) {
|
||||
referenceSidecar := dataColumnSidecars[0]
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/pkg/errors"
|
||||
@@ -48,7 +47,7 @@ func TestMinimumColumnsCountToReconstruct(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Compute the minimum number of columns needed to reconstruct.
|
||||
actual := peerdas.MinimumColumnsCountToReconstruct()
|
||||
actual := peerdas.MinimumColumnCountToReconstruct()
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
@@ -100,7 +99,7 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
t.Run("not enough columns to enable reconstruction", func(t *testing.T) {
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
|
||||
|
||||
minimum := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimum := peerdas.MinimumColumnCountToReconstruct()
|
||||
_, err := peerdas.ReconstructDataColumnSidecars(verifiedRoSidecars[:minimum-1])
|
||||
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
|
||||
})
|
||||
@@ -124,50 +123,6 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestConstructDataColumnSidecars(t *testing.T) {
|
||||
const (
|
||||
blobCount = 3
|
||||
cellsPerBlob = fieldparams.CellsPerBlob
|
||||
)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlock, _, baseVerifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||
|
||||
// Extract blobs and proofs from the sidecars.
|
||||
blobs := make([][]byte, 0, blobCount)
|
||||
cellProofs := make([][]byte, 0, cellsPerBlob)
|
||||
for blobIndex := range blobCount {
|
||||
blob := make([]byte, 0, cellsPerBlob)
|
||||
for columnIndex := range cellsPerBlob {
|
||||
cell := baseVerifiedRoSidecars[columnIndex].Column[blobIndex]
|
||||
blob = append(blob, cell...)
|
||||
}
|
||||
|
||||
blobs = append(blobs, blob)
|
||||
|
||||
for columnIndex := range numberOfColumns {
|
||||
cellProof := baseVerifiedRoSidecars[columnIndex].KzgProofs[blobIndex]
|
||||
cellProofs = append(cellProofs, cellProof)
|
||||
}
|
||||
}
|
||||
|
||||
actual, err := peerdas.ConstructDataColumnSidecars(roBlock, blobs, cellProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Extract the base verified ro sidecars into sidecars.
|
||||
expected := make([]*ethpb.DataColumnSidecar, 0, len(baseVerifiedRoSidecars))
|
||||
for _, verifiedRoSidecar := range baseVerifiedRoSidecars {
|
||||
expected = append(expected, verifiedRoSidecar.DataColumnSidecar)
|
||||
}
|
||||
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestReconstructBlobs(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
@@ -250,7 +205,7 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
// Compute cells and proofs from blob sidecars.
|
||||
var wg errgroup.Group
|
||||
blobs := make([][]byte, blobCount)
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
inputCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
for i := range blobCount {
|
||||
blob := roBlobSidecars[i].Blob
|
||||
blobs[i] = blob
|
||||
@@ -267,7 +222,7 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
|
||||
// It is safe for multiple goroutines to concurrently write to the same slice,
|
||||
// as long as they are writing to different indices, which is the case here.
|
||||
cellsAndProofs[i] = cp
|
||||
inputCellsAndProofs[i] = cp
|
||||
|
||||
return nil
|
||||
})
|
||||
@@ -278,25 +233,24 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
|
||||
// Flatten proofs.
|
||||
cellProofs := make([][]byte, 0, blobCount*numberOfColumns)
|
||||
for _, cp := range cellsAndProofs {
|
||||
for _, cp := range inputCellsAndProofs {
|
||||
for _, proof := range cp.Proofs {
|
||||
cellProofs = append(cellProofs, proof[:])
|
||||
}
|
||||
}
|
||||
|
||||
// Construct data column sidecars.
|
||||
// It is OK to use the public function `ConstructDataColumnSidecars`, as long as
|
||||
// `TestConstructDataColumnSidecars` tests pass.
|
||||
dataColumnSidecars, err := peerdas.ConstructDataColumnSidecars(roBlock, blobs, cellProofs)
|
||||
// Compute celles and proofs from the blobs and cell proofs.
|
||||
cellsAndProofs, err := peerdas.ComputeCellsAndProofs(blobs, cellProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Construct data column sidears from the signed block and cells and proofs.
|
||||
roDataColumnSidecars, err := peerdas.ConstructDataColumnSidecar(cellsAndProofs, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert to verified data column sidecars.
|
||||
verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
roSidecar, err := blocks.NewRODataColumn(dataColumnSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roSidecar)
|
||||
verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars))
|
||||
for _, roDataColumnSidecar := range roDataColumnSidecars {
|
||||
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar)
|
||||
verifiedRoSidecars = append(verifiedRoSidecars, verifiedRoSidecar)
|
||||
}
|
||||
|
||||
@@ -339,3 +293,86 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestComputeCellsAndProofs(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("mismatched blob and proof counts", func(t *testing.T) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Create one blob but proofs for two blobs
|
||||
blobs := [][]byte{{}}
|
||||
|
||||
// Create proofs for 2 blobs worth of columns
|
||||
cellProofs := make([][]byte, 2*numberOfColumns)
|
||||
|
||||
_, err := peerdas.ComputeCellsAndProofs(blobs, cellProofs)
|
||||
require.ErrorIs(t, err, peerdas.ErrBlobsCellsProofsMismatch)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const blobCount = 2
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Generate test blobs
|
||||
_, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
|
||||
|
||||
// Extract blobs and compute expected cells and proofs
|
||||
blobs := make([][]byte, blobCount)
|
||||
expectedCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
var wg errgroup.Group
|
||||
|
||||
for i := range blobCount {
|
||||
blob := roBlobSidecars[i].Blob
|
||||
blobs[i] = blob
|
||||
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
count := copy(kzgBlob[:], blob)
|
||||
require.Equal(t, len(kzgBlob), count)
|
||||
|
||||
cp, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "compute cells and kzg proofs for blob %d", i)
|
||||
}
|
||||
|
||||
expectedCellsAndProofs[i] = cp
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err := wg.Wait()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Flatten proofs
|
||||
cellProofs := make([][]byte, 0, blobCount*numberOfColumns)
|
||||
for _, cp := range expectedCellsAndProofs {
|
||||
for _, proof := range cp.Proofs {
|
||||
cellProofs = append(cellProofs, proof[:])
|
||||
}
|
||||
}
|
||||
|
||||
// Test ComputeCellsAndProofs
|
||||
actualCellsAndProofs, err := peerdas.ComputeCellsAndProofs(blobs, cellProofs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blobCount, len(actualCellsAndProofs))
|
||||
|
||||
// Verify the results match expected
|
||||
for i := range blobCount {
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Cells), len(actualCellsAndProofs[i].Cells))
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Proofs), len(actualCellsAndProofs[i].Proofs))
|
||||
|
||||
// Compare cells
|
||||
for j, expectedCell := range expectedCellsAndProofs[i].Cells {
|
||||
require.Equal(t, expectedCell, actualCellsAndProofs[i].Cells[j])
|
||||
}
|
||||
|
||||
// Compare proofs
|
||||
for j, expectedProof := range expectedCellsAndProofs[i].Proofs {
|
||||
require.Equal(t, expectedProof, actualCellsAndProofs[i].Proofs[j])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,14 +1,26 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
beaconState "github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNilSignedBlockOrEmptyCellsAndProofs = errors.New("nil signed block or empty cells and proofs")
|
||||
ErrSizeMismatch = errors.New("mismatch in the number of blob KZG commitments and cellsAndProofs")
|
||||
ErrNotEnoughDataColumnSidecars = errors.New("not enough columns")
|
||||
ErrDataColumnSidecarsNotSortedByIndex = errors.New("data column sidecars are not sorted by index")
|
||||
)
|
||||
|
||||
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/validator.md#validator-custody
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#validator-custody
|
||||
func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
|
||||
totalNodeBalance := uint64(0)
|
||||
for index := range validatorsIndex {
|
||||
@@ -28,3 +40,154 @@ func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validat
|
||||
count := totalNodeBalance / balancePerAdditionalCustodyGroup
|
||||
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroups), nil
|
||||
}
|
||||
|
||||
// ConstructDataColumnSidecar, given ConstructionPopulator and the cells/proofs associated with each blob in the
|
||||
// block, assembles sidecars which can be distributed to peers.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_block
|
||||
func ConstructDataColumnSidecar(rows []kzg.CellsAndProofs, src ConstructionPopulator) ([]blocks.RODataColumn, error) {
|
||||
if len(rows) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
start := time.Now()
|
||||
cells, proofs, err := rotateRowsToCols(rows, params.BeaconConfig().NumberOfColumns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rotate cells and proofs")
|
||||
}
|
||||
|
||||
maxIdx := params.BeaconConfig().NumberOfColumns
|
||||
roSidecars := make([]blocks.RODataColumn, 0, maxIdx)
|
||||
for idx := range maxIdx {
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
Index: idx,
|
||||
Column: cells[idx],
|
||||
KzgProofs: proofs[idx],
|
||||
}
|
||||
if err := src.Populate(sidecar); err != nil {
|
||||
return nil, errors.Wrap(err, "column field setter set")
|
||||
}
|
||||
if len(sidecar.KzgCommitments) != len(sidecar.Column) || len(sidecar.KzgCommitments) != len(sidecar.KzgProofs) {
|
||||
return nil, ErrSizeMismatch
|
||||
}
|
||||
|
||||
roSidecar, err := blocks.NewRODataColumnWithRoot(sidecar, src.Root())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new ro data column")
|
||||
}
|
||||
roSidecars = append(roSidecars, roSidecar)
|
||||
}
|
||||
|
||||
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
return roSidecars, nil
|
||||
}
|
||||
|
||||
// rotateRowsToCols takes a 2D slice of cells and proofs, where the x is rows (blobs) and y is columns,
|
||||
// and returns a 2D slice where x is columns and y is rows.
|
||||
func rotateRowsToCols(rows []kzg.CellsAndProofs, numCols uint64) ([][][]byte, [][][]byte, error) {
|
||||
if len(rows) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
cellCols := make([][][]byte, numCols)
|
||||
proofCols := make([][][]byte, numCols)
|
||||
for i, cp := range rows {
|
||||
if uint64(len(cp.Cells)) != numCols {
|
||||
return nil, nil, errors.Wrap(ErrNotEnoughDataColumnSidecars, "not enough cells")
|
||||
}
|
||||
if len(cp.Cells) != len(cp.Proofs) {
|
||||
return nil, nil, errors.Wrap(ErrNotEnoughDataColumnSidecars, "not enough proofs")
|
||||
}
|
||||
for j := uint64(0); j < numCols; j++ {
|
||||
if i == 0 {
|
||||
cellCols[j] = make([][]byte, len(rows))
|
||||
proofCols[j] = make([][]byte, len(rows))
|
||||
}
|
||||
cellCols[j][i] = cp.Cells[j][:]
|
||||
proofCols[j][i] = cp.Proofs[j][:]
|
||||
}
|
||||
}
|
||||
return cellCols, proofCols, nil
|
||||
}
|
||||
|
||||
// ConstructionPopulator is an interface that can be satisfied by a type that can use data from a struct
|
||||
// like a DataColumnSidecar or a BeaconBlock to set the fields in a data column sidecar that cannot
|
||||
// be obtained from the engine api.
|
||||
type ConstructionPopulator interface {
|
||||
Populate(*ethpb.DataColumnSidecar) error
|
||||
Slot() primitives.Slot
|
||||
Root() [32]byte
|
||||
Commitments() [][]byte
|
||||
Type() string
|
||||
}
|
||||
|
||||
func PopulateFromSidecar(sidecar blocks.RODataColumn) *SidecarReconstructionSource {
|
||||
return &SidecarReconstructionSource{RODataColumn: sidecar}
|
||||
}
|
||||
|
||||
type SidecarReconstructionSource struct {
|
||||
blocks.RODataColumn
|
||||
}
|
||||
|
||||
func (s *SidecarReconstructionSource) Populate(dc *ethpb.DataColumnSidecar) error {
|
||||
dc.SignedBlockHeader = s.SignedBlockHeader
|
||||
dc.KzgCommitments = s.KzgCommitments
|
||||
dc.KzgCommitmentsInclusionProof = s.KzgCommitmentsInclusionProof
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SidecarReconstructionSource) Root() [32]byte {
|
||||
return s.BlockRoot()
|
||||
}
|
||||
|
||||
func (s *SidecarReconstructionSource) Commitments() [][]byte {
|
||||
return s.KzgCommitments
|
||||
}
|
||||
|
||||
func (s *SidecarReconstructionSource) Type() string {
|
||||
return "DataColumnSidecar"
|
||||
}
|
||||
|
||||
var _ ConstructionPopulator = (*SidecarReconstructionSource)(nil)
|
||||
|
||||
func PopulateFromBlock(block blocks.ROBlock) *BlockReconstructionSource {
|
||||
return &BlockReconstructionSource{ROBlock: block}
|
||||
}
|
||||
|
||||
type BlockReconstructionSource struct {
|
||||
blocks.ROBlock
|
||||
}
|
||||
|
||||
func (b *BlockReconstructionSource) Populate(dc *ethpb.DataColumnSidecar) error {
|
||||
block := b.Block()
|
||||
blockBody := block.Body()
|
||||
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
dc.KzgCommitments = blobKzgCommitments
|
||||
dc.SignedBlockHeader, err = b.Header()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "signed block header")
|
||||
}
|
||||
dc.KzgCommitmentsInclusionProof, err = blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "merkle proof KZG commitments")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *BlockReconstructionSource) Slot() primitives.Slot {
|
||||
return s.Block().Slot()
|
||||
}
|
||||
|
||||
func (s *BlockReconstructionSource) Commitments() [][]byte {
|
||||
c, err := s.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
log.WithField("root", s.Root()).Trace("Unable to get kzg commitments from block")
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (s *BlockReconstructionSource) Type() string {
|
||||
return "BeaconBlock"
|
||||
}
|
||||
|
||||
var _ ConstructionPopulator = (*BlockReconstructionSource)(nil)
|
||||
|
||||
@@ -3,11 +3,15 @@ package peerdas_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestValidatorsCustodyRequirement(t *testing.T) {
|
||||
@@ -53,3 +57,248 @@ func TestValidatorsCustodyRequirement(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnSidecarsFromBlock(t *testing.T) {
|
||||
t.Run("sizes mismatch", func(t *testing.T) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs.
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, params.BeaconConfig().NumberOfColumns),
|
||||
Proofs: make([]kzg.Proof, params.BeaconConfig().NumberOfColumns),
|
||||
},
|
||||
}
|
||||
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
_, err = peerdas.ConstructDataColumnSidecar(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.ErrorIs(t, err, peerdas.ErrSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("cells array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with insufficient cells for the number of columns.
|
||||
// This simulates a scenario where cellsAndProofs has fewer cells than expected columns.
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, 10), // Only 10 cells
|
||||
Proofs: make([]kzg.Proof, 10), // Only 10 proofs
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail because the function will try to access columns up to NumberOfColumns
|
||||
// but we only have 10 cells/proofs.
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
_, err = peerdas.ConstructDataColumnSidecar(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
|
||||
})
|
||||
|
||||
t.Run("proofs array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with sufficient cells but insufficient proofs.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, 5), // Only 5 proofs, less than columns
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail when trying to access proof beyond index 4.
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
_, err = peerdas.ConstructDataColumnSidecar(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
|
||||
require.ErrorContains(t, "not enough proofs", err)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
// Create a Fulu block with blob commitments.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
commitment1 := make([]byte, 48)
|
||||
commitment2 := make([]byte, 48)
|
||||
|
||||
// Set different values to distinguish commitments
|
||||
commitment1[0] = 0x01
|
||||
commitment2[0] = 0x02
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{commitment1, commitment2}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with correct dimensions.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, numberOfColumns),
|
||||
},
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, numberOfColumns),
|
||||
},
|
||||
}
|
||||
|
||||
// Set distinct values in cells and proofs for testing
|
||||
for i := range numberOfColumns {
|
||||
cellsAndProofs[0].Cells[i][0] = byte(i)
|
||||
cellsAndProofs[0].Proofs[i][0] = byte(i)
|
||||
cellsAndProofs[1].Cells[i][0] = byte(i + 128)
|
||||
cellsAndProofs[1].Proofs[i][0] = byte(i + 128)
|
||||
}
|
||||
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
sidecars, err := peerdas.ConstructDataColumnSidecar(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sidecars)
|
||||
require.Equal(t, int(numberOfColumns), len(sidecars))
|
||||
|
||||
// Verify each sidecar has the expected structure
|
||||
for i, sidecar := range sidecars {
|
||||
require.Equal(t, uint64(i), sidecar.Index)
|
||||
require.Equal(t, 2, len(sidecar.Column))
|
||||
require.Equal(t, 2, len(sidecar.KzgCommitments))
|
||||
require.Equal(t, 2, len(sidecar.KzgProofs))
|
||||
|
||||
// Verify commitments match what we set
|
||||
require.DeepEqual(t, commitment1, sidecar.KzgCommitments[0])
|
||||
require.DeepEqual(t, commitment2, sidecar.KzgCommitments[1])
|
||||
|
||||
// Verify column data comes from the correct cells
|
||||
require.Equal(t, byte(i), sidecar.Column[0][0])
|
||||
require.Equal(t, byte(i+128), sidecar.Column[1][0])
|
||||
|
||||
// Verify proofs come from the correct proofs
|
||||
require.Equal(t, byte(i), sidecar.KzgProofs[0][0])
|
||||
require.Equal(t, byte(i+128), sidecar.KzgProofs[1][0])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestDataColumnSidecarsFromColumnSidecar(t *testing.T) {
|
||||
// Create KZG commitments for 2 blobs
|
||||
commitment1 := make([]byte, 48)
|
||||
commitment2 := make([]byte, 48)
|
||||
commitment1[0] = 0x01
|
||||
commitment2[0] = 0x02
|
||||
kzgCommitments := [][]byte{commitment1, commitment2}
|
||||
|
||||
// Create column data for 2 blobs
|
||||
columnData := make([][]byte, 2)
|
||||
columnData[0] = make([]byte, kzg.BytesPerCell)
|
||||
columnData[1] = make([]byte, kzg.BytesPerCell)
|
||||
columnData[0][0] = 0x11 // Distinct values
|
||||
columnData[1][0] = 0x22
|
||||
|
||||
// Create KZG proofs for 2 blobs
|
||||
kzgProofs := make([][]byte, 2)
|
||||
kzgProofs[0] = make([]byte, 48)
|
||||
kzgProofs[1] = make([]byte, 48)
|
||||
kzgProofs[0][0] = 0x33
|
||||
kzgProofs[1][0] = 0x44
|
||||
|
||||
// Create inclusion proof
|
||||
inclusionProof := make([][]byte, 4)
|
||||
for i := range inclusionProof {
|
||||
inclusionProof[i] = make([]byte, 32)
|
||||
inclusionProof[i][0] = byte(i + 0x50)
|
||||
}
|
||||
|
||||
// Create the input VerifiedRODataColumn sidecar using test utility
|
||||
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
|
||||
{
|
||||
Index: 5, // Column index 5
|
||||
Column: columnData,
|
||||
KzgCommitments: kzgCommitments,
|
||||
KzgProofs: kzgProofs,
|
||||
KzgCommitmentsInclusionProof: inclusionProof,
|
||||
Slot: 42,
|
||||
ProposerIndex: 7,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
})
|
||||
require.Equal(t, 1, len(verifiedSidecars))
|
||||
verifiedInputSidecar := verifiedSidecars[0]
|
||||
|
||||
// Create cells and proofs with correct dimensions for 2 blobs
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, numberOfColumns),
|
||||
},
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, numberOfColumns),
|
||||
},
|
||||
}
|
||||
|
||||
// Set distinct values in cells and proofs for testing
|
||||
for i := range numberOfColumns {
|
||||
cellsAndProofs[0].Cells[i][0] = byte(i)
|
||||
cellsAndProofs[0].Proofs[i][0] = byte(i + 10)
|
||||
cellsAndProofs[1].Cells[i][0] = byte(i + 128)
|
||||
cellsAndProofs[1].Proofs[i][0] = byte(i + 138)
|
||||
}
|
||||
|
||||
// Call the function
|
||||
sidecars, err := peerdas.ConstructDataColumnSidecar(cellsAndProofs, peerdas.PopulateFromSidecar(verifiedInputSidecar.RODataColumn))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sidecars)
|
||||
require.Equal(t, int(numberOfColumns), len(sidecars))
|
||||
|
||||
// Verify each sidecar has the expected structure
|
||||
for i, sidecar := range sidecars {
|
||||
require.Equal(t, uint64(i), sidecar.Index)
|
||||
require.Equal(t, 2, len(sidecar.Column))
|
||||
require.Equal(t, 2, len(sidecar.KzgCommitments))
|
||||
require.Equal(t, 2, len(sidecar.KzgProofs))
|
||||
|
||||
// Verify commitments match input
|
||||
require.DeepEqual(t, commitment1, sidecar.KzgCommitments[0])
|
||||
require.DeepEqual(t, commitment2, sidecar.KzgCommitments[1])
|
||||
|
||||
// Verify column data comes from the correct cells
|
||||
require.Equal(t, byte(i), sidecar.Column[0][0])
|
||||
require.Equal(t, byte(i+128), sidecar.Column[1][0])
|
||||
|
||||
// Verify proofs come from the correct proofs
|
||||
require.Equal(t, byte(i+10), sidecar.KzgProofs[0][0])
|
||||
require.Equal(t, byte(i+138), sidecar.KzgProofs[1][0])
|
||||
|
||||
// Verify inclusion proof is preserved
|
||||
require.Equal(t, len(inclusionProof), len(sidecar.KzgCommitmentsInclusionProof))
|
||||
for j, proof := range sidecar.KzgCommitmentsInclusionProof {
|
||||
require.Equal(t, byte(j+0x50), proof[0])
|
||||
}
|
||||
|
||||
// Verify signed block header is preserved
|
||||
require.Equal(t, primitives.Slot(42), sidecar.SignedBlockHeader.Header.Slot)
|
||||
require.Equal(t, primitives.ValidatorIndex(7), sidecar.SignedBlockHeader.Header.ProposerIndex)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"domain.go",
|
||||
"signature.go",
|
||||
"signing_root.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing",
|
||||
@@ -25,7 +24,6 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"domain_test.go",
|
||||
"signature_test.go",
|
||||
"signing_root_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
package signing
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var ErrNilRegistration = errors.New("nil signed registration")
|
||||
|
||||
// VerifyRegistrationSignature verifies the signature of a validator's registration.
|
||||
func VerifyRegistrationSignature(
|
||||
sr *ethpb.SignedValidatorRegistrationV1,
|
||||
) error {
|
||||
if sr == nil || sr.Message == nil {
|
||||
return ErrNilRegistration
|
||||
}
|
||||
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
// Per spec, we want the fork version and genesis validator to be nil.
|
||||
// Which is genesis value and zero by default.
|
||||
sd, err := ComputeDomain(
|
||||
d,
|
||||
nil, /* fork version */
|
||||
nil /* genesis val root */)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := VerifySigningRoot(sr.Message, sr.Message.Pubkey, sr.Signature, sd); err != nil {
|
||||
return ErrSigFailedToVerify
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package signing_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestVerifyRegistrationSignature(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
reg := ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("fee"), 20),
|
||||
GasLimit: 123456,
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: sk.PublicKey().Marshal(),
|
||||
}
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
domain, err := signing.ComputeDomain(d, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(reg, domain)
|
||||
require.NoError(t, err)
|
||||
sk.Sign(sr[:]).Marshal()
|
||||
|
||||
sReg := ðpb.SignedValidatorRegistrationV1{
|
||||
Message: reg,
|
||||
Signature: sk.Sign(sr[:]).Marshal(),
|
||||
}
|
||||
require.NoError(t, signing.VerifyRegistrationSignature(sReg))
|
||||
|
||||
sReg.Signature = []byte("bad")
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrSigFailedToVerify)
|
||||
|
||||
sReg.Message = nil
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrNilRegistration)
|
||||
}
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability_blobs.go",
|
||||
"availability_columns.go",
|
||||
"blob_cache.go",
|
||||
"data_column_cache.go",
|
||||
"iface.go",
|
||||
@@ -13,7 +12,6 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -23,7 +21,6 @@ go_library(
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -33,16 +30,13 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"availability_blobs_test.go",
|
||||
"availability_columns_test.go",
|
||||
"blob_cache_test.go",
|
||||
"data_column_cache_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -51,7 +45,6 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -53,30 +53,25 @@ func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchV
|
||||
// Persist adds blobs to the working blob cache. Blobs stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
|
||||
func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ...blocks.ROBlob) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sidecars)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
|
||||
if len(blobSidecars) > 1 {
|
||||
firstRoot := blobSidecars[0].BlockRoot()
|
||||
for _, sidecar := range blobSidecars[1:] {
|
||||
if len(sidecars) > 1 {
|
||||
firstRoot := sidecars[0].BlockRoot()
|
||||
for _, sidecar := range sidecars[1:] {
|
||||
if sidecar.BlockRoot() != firstRoot {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(blobSidecars[0].Slot()), slots.ToEpoch(current)) {
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sidecars[0].Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
key := keyFromSidecar(blobSidecars[0])
|
||||
key := keyFromSidecar(sidecars[0])
|
||||
entry := s.cache.ensure(key)
|
||||
for _, blobSidecar := range blobSidecars {
|
||||
for _, blobSidecar := range sidecars {
|
||||
if err := entry.stash(&blobSidecar); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -118,23 +118,21 @@ func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[2]))
|
||||
require.NoError(t, as.Persist(1, blobSidecars[2]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All but one persisted, return missing idx
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All persisted, return nil
|
||||
require.NoError(t, as.Persist(1, scs...))
|
||||
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||
|
||||
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
||||
}
|
||||
@@ -149,10 +147,8 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorIs(t, err, errCommitmentMismatch)
|
||||
@@ -161,29 +157,25 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
|
||||
// stashes as expected
|
||||
require.NoError(t, as.Persist(1, scs...))
|
||||
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||
// ignores duplicates
|
||||
require.ErrorIs(t, as.Persist(1, scs...), ErrDuplicateSidecar)
|
||||
require.ErrorIs(t, as.Persist(1, blobSidecars...), ErrDuplicateSidecar)
|
||||
|
||||
// ignores index out of bound
|
||||
blobSidecars[0].Index = 6
|
||||
require.ErrorIs(t, as.Persist(1, blocks.NewSidecarFromBlobSidecar(blobSidecars[0])), errIndexOutOfBounds)
|
||||
require.ErrorIs(t, as.Persist(1, blobSidecars[0]), errIndexOutOfBounds)
|
||||
|
||||
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||
|
||||
more := blocks.NewSidecarsFromBlobSidecars(moreBlobSidecars)
|
||||
|
||||
// ignores sidecars before the retention period
|
||||
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, as.Persist(32+slotOOB, more[0]))
|
||||
require.NoError(t, as.Persist(32+slotOOB, moreBlobSidecars[0]))
|
||||
|
||||
// doesn't ignore new sidecars with a different block root
|
||||
require.NoError(t, as.Persist(1, more...))
|
||||
require.NoError(t, as.Persist(1, moreBlobSidecars...))
|
||||
}
|
||||
|
||||
type mockBlobBatchVerifier struct {
|
||||
|
||||
@@ -1,208 +0,0 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any data columns passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.DataColumnStorage
|
||||
nodeID enode.ID
|
||||
cache *dataColumnCache
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &LazilyPersistentStoreColumn{}
|
||||
|
||||
// DataColumnsVerifier enables LazilyPersistentStoreColumn to manage the verification process
|
||||
// going from RODataColumn->VerifiedRODataColumn, while avoiding the decision of which individual verifications
|
||||
// to run and in what order. Since LazilyPersistentStoreColumn always tries to verify and save data columns only when
|
||||
// they are all available, the interface takes a slice of data column sidecars.
|
||||
type DataColumnsVerifier interface {
|
||||
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, scs []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// NewLazilyPersistentStoreColumn creates a new LazilyPersistentStoreColumn.
|
||||
// WARNING: The resulting LazilyPersistentStoreColumn is NOT thread-safe.
|
||||
func NewLazilyPersistentStoreColumn(store *filesystem.DataColumnStorage, nodeID enode.ID, newDataColumnsVerifier verification.NewDataColumnsVerifier, custodyInfo *peerdas.CustodyInfo) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
nodeID: nodeID,
|
||||
cache: newDataColumnCache(),
|
||||
custodyInfo: custodyInfo,
|
||||
newDataColumnsVerifier: newDataColumnsVerifier,
|
||||
}
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. Columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all columns referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := blocks.DataColumnSidecarsFromSidecars(sidecars)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
|
||||
// It is safe to retrieve the first sidecar.
|
||||
firstSidecar := dataColumnSidecars[0]
|
||||
|
||||
if len(sidecars) > 1 {
|
||||
firstRoot := firstSidecar.BlockRoot()
|
||||
for _, sidecar := range dataColumnSidecars[1:] {
|
||||
if sidecar.BlockRoot() != firstRoot {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
firstSidecarEpoch, currentEpoch := slots.ToEpoch(firstSidecar.Slot()), slots.ToEpoch(current)
|
||||
if !params.WithinDAPeriod(firstSidecarEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
key := cacheKey{slot: firstSidecar.Slot(), root: firstSidecar.BlockRoot()}
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
for _, sidecar := range dataColumnSidecars {
|
||||
if err := entry.stash(&sidecar); err != nil {
|
||||
return errors.Wrap(err, "stash DataColumnSidecar")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, currentSlot primitives.Slot, block blocks.ROBlock) error {
|
||||
blockCommitments, err := s.fullCommitmentsToCheck(s.nodeID, block, currentSlot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "full commitments to check with block root `%#x` and current slot `%d`", block.Root(), currentSlot)
|
||||
}
|
||||
|
||||
// Return early for blocks that do not have any commitments.
|
||||
if blockCommitments.count() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the root of the block.
|
||||
blockRoot := block.Root()
|
||||
|
||||
// Build the cache key for the block.
|
||||
key := cacheKey{slot: block.Block().Slot(), root: blockRoot}
|
||||
|
||||
// Retrieve the cache entry for the block, or create an empty one if it doesn't exist.
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
// Delete the cache entry for the block at the end.
|
||||
defer s.cache.delete(key)
|
||||
|
||||
// Set the disk summary for the block in the cache entry.
|
||||
entry.setDiskSummary(s.store.Summary(blockRoot))
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
roDataColumns, err := entry.filter(blockRoot, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "entry filter")
|
||||
}
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
|
||||
verifier := s.newDataColumnsVerifier(roDataColumns, verification.ByRangeRequestDataColumnSidecarRequirements)
|
||||
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
return errors.Wrap(err, "valid fields")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
return errors.Wrap(err, "sidecar inclusion proven")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return errors.Wrap(err, "sidecar KZG proof verified")
|
||||
}
|
||||
|
||||
verifiedRoDataColumns, err := verifier.VerifiedRODataColumns()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "verified RO data columns - should never happen")
|
||||
}
|
||||
|
||||
if err := s.store.Save(verifiedRoDataColumns); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fullCommitmentsToCheck returns the commitments to check for a given block.
|
||||
func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
|
||||
// Return early for blocks that are pre-Fulu.
|
||||
if block.Version() < version.Fulu {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Compute the block epoch.
|
||||
blockSlot := block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
// Compute the current epoch.
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the KZG commitments for the block.
|
||||
kzgCommitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Return early if there are no commitments in the block.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the groups count.
|
||||
custodyGroupCount := s.custodyInfo.ActualGroupCount()
|
||||
|
||||
// Retrieve peer info.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Create a safe commitments array for the custody columns.
|
||||
commitmentsArray := &safeCommitmentsArray{}
|
||||
commitmentsArraySize := uint64(len(commitmentsArray))
|
||||
|
||||
for column := range peerInfo.CustodyColumns {
|
||||
if column >= commitmentsArraySize {
|
||||
return nil, errors.Errorf("custody column index %d too high (max allowed %d) - should never happen", column, commitmentsArraySize)
|
||||
}
|
||||
|
||||
commitmentsArray[column] = kzgCommitments
|
||||
}
|
||||
|
||||
return commitmentsArray, nil
|
||||
}
|
||||
@@ -1,319 +0,0 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
var commitments = [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
|
||||
func TestPersist(t *testing.T) {
|
||||
t.Run("no sidecars", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
err := lazilyPersistentStoreColumns.Persist(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("mixed roots", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 2, Index: 2},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
|
||||
require.ErrorIs(t, err, errMixedRoots)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("outside DA period", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(1_000_000, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const slot = 42
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: slot, Index: 1},
|
||||
{Slot: slot, Index: 5},
|
||||
}
|
||||
|
||||
roSidecars, roDataColumns := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(slot, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
|
||||
key := cacheKey{slot: slot, root: roDataColumns[0].BlockRoot()}
|
||||
entry, ok := lazilyPersistentStoreColumns.cache.entries[key]
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
// A call to Persist does NOT save the sidecars to disk.
|
||||
require.Equal(t, uint64(0), entry.diskSummary.Count())
|
||||
|
||||
require.DeepSSZEqual(t, roDataColumns[0], *entry.scs[1])
|
||||
require.DeepSSZEqual(t, roDataColumns[1], *entry.scs[5])
|
||||
|
||||
for i, roDataColumn := range entry.scs {
|
||||
if map[int]bool{1: true, 5: true}[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
require.IsNil(t, roDataColumn)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
newDataColumnsVerifier := func(dataColumnSidecars []blocks.RODataColumn, _ []verification.Requirement) verification.DataColumnsVerifier {
|
||||
return &mockDataColumnsVerifier{t: t, dataColumnSidecars: dataColumnSidecars}
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("without commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("with commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
block := signedRoBlock.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
root := signedRoBlock.Root()
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, &peerdas.CustodyInfo{})
|
||||
|
||||
indices := [...]uint64{1, 17, 87, 102}
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
dataColumnParams := util.DataColumnParam{
|
||||
Index: index,
|
||||
KzgCommitments: commitments,
|
||||
|
||||
Slot: slot,
|
||||
ProposerIndex: proposerIndex,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
|
||||
dataColumnsParams = append(dataColumnsParams, dataColumnParams)
|
||||
}
|
||||
|
||||
_, verifiedRoDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParams)
|
||||
|
||||
key := cacheKey{root: root}
|
||||
entry := lazilyPersistentStoreColumns.cache.ensure(key)
|
||||
defer lazilyPersistentStoreColumns.cache.delete(key)
|
||||
|
||||
for _, verifiedRoDataColumn := range verifiedRoDataColumns {
|
||||
err := entry.stash(&verifiedRoDataColumn.RODataColumn)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = lazilyPersistentStoreColumns.IsDataAvailable(ctx, slot, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := dataColumnStorage.Get(root, indices[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
summary := dataColumnStorage.Summary(root)
|
||||
require.Equal(t, uint64(len(indices)), summary.Count())
|
||||
require.DeepSSZEqual(t, verifiedRoDataColumns, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFullCommitmentsToCheck(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
commitments [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "Pre-Fulu block",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
return newSignedRoBlock(t, util.NewBeaconBlockElectra())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Commitments outside data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
beaconBlockElectra := util.NewBeaconBlockElectra()
|
||||
|
||||
// Block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
beaconBlockElectra.Block.Body.BlobKzgCommitments = commitments
|
||||
|
||||
return newSignedRoBlock(t, beaconBlockElectra)
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
},
|
||||
{
|
||||
name: "Commitments within data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedBeaconBlockFulu.Block.Slot = 100
|
||||
|
||||
return newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
},
|
||||
commitments: commitments,
|
||||
slot: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
b := tc.block(t)
|
||||
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
|
||||
commitmentsArray, err := s.fullCommitmentsToCheck(enode.ID{}, b, tc.slot)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, commitments := range commitmentsArray {
|
||||
require.DeepEqual(t, tc.commitments, commitments)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func roSidecarsFromDataColumnParamsByBlockRoot(t *testing.T, parameters []util.DataColumnParam) ([]blocks.ROSidecar, []blocks.RODataColumn) {
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, parameters)
|
||||
|
||||
roSidecars := make([]blocks.ROSidecar, 0, len(roDataColumns))
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
roSidecars = append(roSidecars, blocks.NewSidecarFromDataColumnSidecar(roDataColumn))
|
||||
}
|
||||
|
||||
return roSidecars, roDataColumns
|
||||
}
|
||||
|
||||
func newSignedRoBlock(t *testing.T, signedBeaconBlock interface{}) blocks.ROBlock {
|
||||
sb, err := blocks.NewSignedBeaconBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
return rb
|
||||
}
|
||||
|
||||
type mockDataColumnsVerifier struct {
|
||||
t *testing.T
|
||||
dataColumnSidecars []blocks.RODataColumn
|
||||
validCalled, SidecarInclusionProvenCalled, SidecarKzgProofVerifiedCalled bool
|
||||
}
|
||||
|
||||
var _ verification.DataColumnsVerifier = &mockDataColumnsVerifier{}
|
||||
|
||||
func (m *mockDataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
|
||||
require.Equal(m.t, true, m.validCalled && m.SidecarInclusionProvenCalled && m.SidecarKzgProofVerifiedCalled)
|
||||
|
||||
verifiedDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(m.dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range m.dataColumnSidecars {
|
||||
verifiedDataColumnSidecar := blocks.NewVerifiedRODataColumn(dataColumnSidecar)
|
||||
verifiedDataColumnSidecars = append(verifiedDataColumnSidecars, verifiedDataColumnSidecar)
|
||||
}
|
||||
|
||||
return verifiedDataColumnSidecars, nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SatisfyRequirement(verification.Requirement) {}
|
||||
|
||||
func (m *mockDataColumnsVerifier) ValidFields() error {
|
||||
m.validCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) CorrectSubnet(dataColumnSidecarSubTopic string, expectedTopics []string) error {
|
||||
return nil
|
||||
}
|
||||
func (m *mockDataColumnsVerifier) NotFromFutureSlot() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SlotAboveFinalized() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) ValidProposerSignature(ctx context.Context) error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentValid(badParent func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSlotLower() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SidecarDescendsFromFinalized() error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarInclusionProven() error {
|
||||
m.SidecarInclusionProvenCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarKzgProofVerified() error {
|
||||
m.SidecarKzgProofVerifiedCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarProposerExpected(ctx context.Context) error { return nil }
|
||||
@@ -99,20 +99,26 @@ func (e *blobCacheEntry) filter(root [32]byte, kc [][]byte, slot primitives.Slot
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
// Check if e.scs has this index before accessing
|
||||
var sidecar *blocks.ROBlob
|
||||
if i < uint64(len(e.scs)) {
|
||||
sidecar = e.scs[i]
|
||||
}
|
||||
|
||||
if kc[i] == nil {
|
||||
if e.scs[i] != nil {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, e.scs[i].KzgCommitment)
|
||||
if sidecar != nil {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, sidecar.KzgCommitment)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if e.scs[i] == nil {
|
||||
if sidecar == nil {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
|
||||
}
|
||||
if !bytes.Equal(kc[i], e.scs[i].KzgCommitment) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitment, kc[i])
|
||||
if !bytes.Equal(kc[i], sidecar.KzgCommitment) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, sidecar.KzgCommitment, kc[i])
|
||||
}
|
||||
scs = append(scs, *e.scs[i])
|
||||
scs = append(scs, *sidecar)
|
||||
}
|
||||
|
||||
return scs, nil
|
||||
|
||||
@@ -155,6 +155,41 @@ func TestFilter(t *testing.T) {
|
||||
},
|
||||
err: errCommitmentMismatch,
|
||||
},
|
||||
{
|
||||
name: "empty scs array with commitments",
|
||||
setup: func(t *testing.T) (*blobCacheEntry, [][]byte, []blocks.ROBlob) {
|
||||
// This reproduces the panic condition where entry.scs is empty or nil
|
||||
// but we have commitments to check
|
||||
entry := &blobCacheEntry{
|
||||
scs: nil, // Empty/nil array that caused the panic
|
||||
}
|
||||
// Create a commitment that would trigger the check at index 0
|
||||
commits := [][]byte{
|
||||
bytesutil.PadTo([]byte("commitment1"), 48),
|
||||
}
|
||||
return entry, commits, nil
|
||||
},
|
||||
err: errMissingSidecar,
|
||||
},
|
||||
{
|
||||
name: "scs array shorter than commitments",
|
||||
setup: func(t *testing.T) (*blobCacheEntry, [][]byte, []blocks.ROBlob) {
|
||||
// This reproduces the condition where entry.scs exists but is shorter
|
||||
// than the number of commitments we're checking
|
||||
entry := &blobCacheEntry{
|
||||
scs: make([]*blocks.ROBlob, 2), // Only 2 slots
|
||||
}
|
||||
// Create 4 commitments, accessing index 2 and 3 would have panicked
|
||||
commits := [][]byte{
|
||||
nil,
|
||||
nil,
|
||||
bytesutil.PadTo([]byte("commitment3"), 48),
|
||||
bytesutil.PadTo([]byte("commitment4"), 48),
|
||||
}
|
||||
return entry, commits, nil
|
||||
},
|
||||
err: errMissingSidecar,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
|
||||
@@ -15,5 +15,5 @@ import (
|
||||
// durably persisted before returning a non-error value.
|
||||
type AvailabilityStore interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
Persist(current primitives.Slot, sc ...blocks.ROSidecar) error
|
||||
Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
|
||||
}
|
||||
|
||||
@@ -5,13 +5,12 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// MockAvailabilityStore is an implementation of AvailabilityStore that can be used by other packages in tests.
|
||||
type MockAvailabilityStore struct {
|
||||
VerifyAvailabilityCallback func(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
PersistBlobsCallback func(current primitives.Slot, sc ...blocks.ROBlob) error
|
||||
PersistBlobsCallback func(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &MockAvailabilityStore{}
|
||||
@@ -25,13 +24,9 @@ func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current pri
|
||||
}
|
||||
|
||||
// Persist satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
|
||||
func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROSidecar) error {
|
||||
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sc)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
func (m *MockAvailabilityStore) Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error {
|
||||
if m.PersistBlobsCallback != nil {
|
||||
return m.PersistBlobsCallback(current, blobSidecars...)
|
||||
return m.PersistBlobsCallback(current, blobSidecar...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd/beacon-chain:__subpackages__",
|
||||
"//genesis:__subpackages__",
|
||||
"//testing/slasher/simulator:__pkg__",
|
||||
"//tools:__subpackages__",
|
||||
],
|
||||
|
||||
@@ -10,6 +10,11 @@ type ReadOnlyDatabase = iface.ReadOnlyDatabase
|
||||
// about head info. For head info, use github.com/prysmaticlabs/prysm/blockchain.HeadFetcher.
|
||||
type NoHeadAccessDatabase = iface.NoHeadAccessDatabase
|
||||
|
||||
// ReadOnlyDatabaseWithSeqNum exposes Prysm's Ethereum data backend for read access only, no information about
|
||||
// head info, but with read/write access to the p2p metadata sequence number.
|
||||
// This is used for the p2p service.
|
||||
type ReadOnlyDatabaseWithSeqNum = iface.ReadOnlyDatabaseWithSeqNum
|
||||
|
||||
// HeadAccessDatabase exposes Prysm's Ethereum backend for read/write access with information about
|
||||
// chain head information. This interface should be used sparingly as the HeadFetcher is the source
|
||||
// of truth around chain head information while this interface serves as persistent storage for the
|
||||
|
||||
@@ -100,6 +100,14 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
// DataColumnStorageReader is an interface to read data column sidecars from the filesystem.
|
||||
type DataColumnStorageReader interface {
|
||||
Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary
|
||||
Get(root [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
var _ DataColumnStorageReader = &DataColumnStorage{}
|
||||
|
||||
// WithDataColumnBasePath is a required option that sets the base path of data column storage.
|
||||
func WithDataColumnBasePath(base string) DataColumnStorageOption {
|
||||
return func(b *DataColumnStorage) error {
|
||||
|
||||
@@ -84,12 +84,6 @@ func (s DataColumnStorageSummary) Stored() map[uint64]bool {
|
||||
return stored
|
||||
}
|
||||
|
||||
// DataColumnStorageSummarizer can be used to receive a summary of metadata about data columns on disk for a given root.
|
||||
// The DataColumnStorageSummary can be used to check which indices (if any) are available for a given block by root.
|
||||
type DataColumnStorageSummarizer interface {
|
||||
Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary
|
||||
}
|
||||
|
||||
type dataColumnStorageSummaryCache struct {
|
||||
mu sync.RWMutex
|
||||
dataColumnCount float64
|
||||
@@ -98,8 +92,6 @@ type dataColumnStorageSummaryCache struct {
|
||||
cache map[[fieldparams.RootLength]byte]DataColumnStorageSummary
|
||||
}
|
||||
|
||||
var _ DataColumnStorageSummarizer = &dataColumnStorageSummaryCache{}
|
||||
|
||||
func newDataColumnStorageSummaryCache() *dataColumnStorageSummaryCache {
|
||||
return &dataColumnStorageSummaryCache{
|
||||
cache: make(map[[fieldparams.RootLength]byte]DataColumnStorageSummary),
|
||||
|
||||
@@ -144,14 +144,3 @@ func NewEphemeralDataColumnStorageWithMocker(t testing.TB) (*DataColumnMocker, *
|
||||
fs, dcs := NewEphemeralDataColumnStorageAndFs(t)
|
||||
return &DataColumnMocker{fs: fs, dcs: dcs}, dcs
|
||||
}
|
||||
|
||||
func NewMockDataColumnStorageSummarizer(t *testing.T, set map[[fieldparams.RootLength]byte][]uint64) DataColumnStorageSummarizer {
|
||||
c := newDataColumnStorageSummaryCache()
|
||||
for root, indices := range set {
|
||||
if err := c.set(DataColumnsIdent{Root: root, Epoch: 0, Indices: indices}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ type ReadOnlyDatabase interface {
|
||||
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
HighestRootsBelowSlot(ctx context.Context, slot primitives.Slot) (primitives.Slot, [][32]byte, error)
|
||||
EarliestSlot(ctx context.Context) (primitives.Slot, error)
|
||||
// State related methods.
|
||||
State(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
StateOrError(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
@@ -56,14 +57,25 @@ type ReadOnlyDatabase interface {
|
||||
// Fee recipients operations.
|
||||
FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error)
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
// light client operations
|
||||
// Light client operations
|
||||
LightClientUpdates(ctx context.Context, startPeriod, endPeriod uint64) (map[uint64]interfaces.LightClientUpdate, error)
|
||||
LightClientUpdate(ctx context.Context, period uint64) (interfaces.LightClientUpdate, error)
|
||||
LightClientBootstrap(ctx context.Context, blockRoot []byte) (interfaces.LightClientBootstrap, error)
|
||||
|
||||
// origin checkpoint sync support
|
||||
// Origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillStatus(context.Context) (*dbval.BackfillStatus, error)
|
||||
|
||||
// P2P Metadata operations.
|
||||
MetadataSeqNum(ctx context.Context) (uint64, error)
|
||||
}
|
||||
|
||||
// ReadOnlyDatabaseWithSeqNum defines a struct which has read access to database methods
|
||||
// and also has read/write access to the p2p metadata sequence number.
|
||||
// Only used for the p2p service.
|
||||
type ReadOnlyDatabaseWithSeqNum interface {
|
||||
ReadOnlyDatabase
|
||||
|
||||
SaveMetadataSeqNum(ctx context.Context, seqNum uint64) error
|
||||
}
|
||||
|
||||
// NoHeadAccessDatabase defines a struct without access to chain head data.
|
||||
@@ -102,6 +114,24 @@ type NoHeadAccessDatabase interface {
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot, batchSize int) (int, error)
|
||||
|
||||
// Genesis operations.
|
||||
LoadGenesis(ctx context.Context, stateBytes []byte) error
|
||||
SaveGenesisData(ctx context.Context, state state.BeaconState) error
|
||||
EnsureEmbeddedGenesis(ctx context.Context) error
|
||||
|
||||
// Support for checkpoint sync and backfill.
|
||||
SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
|
||||
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
|
||||
|
||||
// Custody operations.
|
||||
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
|
||||
UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
||||
|
||||
// P2P Metadata operations.
|
||||
SaveMetadataSeqNum(ctx context.Context, seqNum uint64) error
|
||||
}
|
||||
|
||||
// HeadAccessDatabase defines a struct with access to reading chain head data.
|
||||
@@ -112,16 +142,6 @@ type HeadAccessDatabase interface {
|
||||
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
HeadBlockRoot() ([32]byte, error)
|
||||
SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
|
||||
// Genesis operations.
|
||||
LoadGenesis(ctx context.Context, stateBytes []byte) error
|
||||
SaveGenesisData(ctx context.Context, state state.BeaconState) error
|
||||
EnsureEmbeddedGenesis(ctx context.Context) error
|
||||
|
||||
// Support for checkpoint sync and backfill.
|
||||
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
|
||||
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
|
||||
}
|
||||
|
||||
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"backup.go",
|
||||
"blocks.go",
|
||||
"checkpoint.go",
|
||||
"custody.go",
|
||||
"deposit_contract.go",
|
||||
"encoding.go",
|
||||
"error.go",
|
||||
@@ -23,6 +24,7 @@ go_library(
|
||||
"migration_block_slot_index.go",
|
||||
"migration_finalized_parent.go",
|
||||
"migration_state_validators.go",
|
||||
"p2p.go",
|
||||
"schema.go",
|
||||
"state.go",
|
||||
"state_summary.go",
|
||||
@@ -38,7 +40,6 @@ go_library(
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -50,6 +51,7 @@ go_library(
|
||||
"//container/slice:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//monitoring/progress:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
@@ -83,6 +85,7 @@ go_test(
|
||||
"backup_test.go",
|
||||
"blocks_test.go",
|
||||
"checkpoint_test.go",
|
||||
"custody_test.go",
|
||||
"deposit_contract_test.go",
|
||||
"encoding_test.go",
|
||||
"execution_chain_test.go",
|
||||
@@ -94,6 +97,7 @@ go_test(
|
||||
"migration_archived_index_test.go",
|
||||
"migration_block_slot_index_test.go",
|
||||
"migration_state_validators_test.go",
|
||||
"p2p_test.go",
|
||||
"state_summary_test.go",
|
||||
"state_test.go",
|
||||
"utils_test.go",
|
||||
@@ -106,7 +110,6 @@ go_test(
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -116,6 +119,7 @@ go_test(
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -860,6 +860,47 @@ func (s *Store) SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primi
|
||||
})
|
||||
}
|
||||
|
||||
// EarliestStoredSlot returns the earliest slot in the database.
|
||||
func (s *Store) EarliestSlot(ctx context.Context) (primitives.Slot, error) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.EarliestSlot")
|
||||
defer span.End()
|
||||
|
||||
earliestAvailableSlot := primitives.Slot(0)
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve the root corresponding to the earliest available block.
|
||||
c := tx.Bucket(blockSlotIndicesBucket).Cursor()
|
||||
k, v := c.First()
|
||||
if k == nil || v == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
slot := bytesutil.BytesToSlotBigEndian(k)
|
||||
|
||||
// The genesis block may be indexed in this bucket, even if we started from a checkpoint.
|
||||
// Because of this, we check the next block. If the next block is still in the genesis epoch,
|
||||
// then we consider we have the whole chain.
|
||||
if slot != 0 {
|
||||
earliestAvailableSlot = slot
|
||||
}
|
||||
|
||||
k, v = c.Next()
|
||||
if k == nil || v == nil {
|
||||
// Only the genesis block is available.
|
||||
return nil
|
||||
}
|
||||
slot = bytesutil.BytesToSlotBigEndian(k)
|
||||
if slot < slotsPerEpoch {
|
||||
// We are still in the genesis epoch, so we consider we have the whole chain.
|
||||
return nil
|
||||
}
|
||||
|
||||
earliestAvailableSlot = slot
|
||||
return nil
|
||||
})
|
||||
|
||||
return earliestAvailableSlot, err
|
||||
}
|
||||
|
||||
type slotRoot struct {
|
||||
slot primitives.Slot
|
||||
root [32]byte
|
||||
@@ -883,7 +924,7 @@ func (s *Store) slotRootsInRange(ctx context.Context, start, end primitives.Slot
|
||||
c := bkt.Cursor()
|
||||
for k, v := c.Seek(key); ; /* rely on internal checks to exit */ k, v = c.Prev() {
|
||||
if len(k) == 0 && len(v) == 0 {
|
||||
// The `edge`` variable and this `if` deal with 2 edge cases:
|
||||
// The `edge` variable and this `if` deal with 2 edge cases:
|
||||
// - Seeking past the end of the bucket (the `end` param is higher than the highest slot).
|
||||
// - Seeking before the beginning of the bucket (the `start` param is lower than the lowest slot).
|
||||
// In both of these cases k,v will be nil and we can handle the same way using `edge` to
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -1327,3 +1328,86 @@ func TestStore_RegistrationsByValidatorID(t *testing.T) {
|
||||
want := errors.Wrap(ErrNotFoundFeeRecipient, "validator id 3")
|
||||
require.Equal(t, want.Error(), err.Error())
|
||||
}
|
||||
|
||||
// Block creates a phase0 beacon block at the specified slot and saves it to the database.
|
||||
func createAndSaveBlock(t *testing.T, ctx context.Context, db *Store, slot primitives.Slot) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = slot
|
||||
|
||||
wrappedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wrappedBlock))
|
||||
}
|
||||
|
||||
func TestStore_EarliestSlot(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("empty database returns ErrNotFound", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
assert.Equal(t, primitives.Slot(0), slot)
|
||||
})
|
||||
|
||||
t.Run("database with only genesis block", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
// Create and save genesis block (slot 0)
|
||||
createAndSaveBlock(t, ctx, db, 0)
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(0), slot)
|
||||
})
|
||||
|
||||
t.Run("database with genesis and blocks in genesis epoch", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Create and save genesis block (slot 0)
|
||||
createAndSaveBlock(t, ctx, db, 0)
|
||||
|
||||
// Create and save a block in the genesis epoch
|
||||
createAndSaveBlock(t, ctx, db, primitives.Slot(slotsPerEpoch-1))
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(0), slot)
|
||||
})
|
||||
|
||||
t.Run("database with genesis and blocks beyond genesis epoch", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Create and save genesis block (slot 0)
|
||||
createAndSaveBlock(t, ctx, db, 0)
|
||||
|
||||
// Create and save a block beyond the genesis epoch
|
||||
nextEpochSlot := primitives.Slot(slotsPerEpoch)
|
||||
createAndSaveBlock(t, ctx, db, nextEpochSlot)
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, nextEpochSlot, slot)
|
||||
})
|
||||
|
||||
t.Run("database starting from checkpoint (non-zero earliest slot)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Simulate starting from a checkpoint by creating blocks starting from a later slot
|
||||
checkpointSlot := primitives.Slot(slotsPerEpoch * 10) // 10 epochs later
|
||||
nextEpochSlot := checkpointSlot + slotsPerEpoch
|
||||
|
||||
// Create and save first block at checkpoint slot
|
||||
createAndSaveBlock(t, ctx, db, checkpointSlot)
|
||||
|
||||
// Create and save another block in the next epoch
|
||||
createAndSaveBlock(t, ctx, db, nextEpochSlot)
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, nextEpochSlot, slot)
|
||||
})
|
||||
}
|
||||
|
||||
129
beacon-chain/db/kv/custody.go
Normal file
129
beacon-chain/db/kv/custody.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// UpdateCustodyInfo atomically updates the custody group count only it is greater than the stored one.
|
||||
// In this case, it also updates the earliest available slot with the provided value.
|
||||
// It returns the (potentially updated) custody group count and earliest available slot.
|
||||
func (s *Store) UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.UpdateCustodyInfo")
|
||||
defer span.End()
|
||||
|
||||
storedGroupCount, storedEarliestAvailableSlot := uint64(0), primitives.Slot(0)
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create custody bucket")
|
||||
}
|
||||
|
||||
// Retrieve the stored custody group count.
|
||||
storedGroupCountBytes := bucket.Get(groupCountKey)
|
||||
if len(storedGroupCountBytes) != 0 {
|
||||
storedGroupCount = bytesutil.BytesToUint64BigEndian(storedGroupCountBytes)
|
||||
}
|
||||
|
||||
// Retrieve the stored earliest available slot.
|
||||
storedEarliestAvailableSlotBytes := bucket.Get(earliestAvailableSlotKey)
|
||||
if len(storedEarliestAvailableSlotBytes) != 0 {
|
||||
storedEarliestAvailableSlot = primitives.Slot(bytesutil.BytesToUint64BigEndian(storedEarliestAvailableSlotBytes))
|
||||
}
|
||||
|
||||
// Exit early if the new custody group count is lower than or equal to the stored one.
|
||||
if custodyGroupCount <= storedGroupCount {
|
||||
return nil
|
||||
}
|
||||
|
||||
storedGroupCount, storedEarliestAvailableSlot = custodyGroupCount, earliestAvailableSlot
|
||||
|
||||
// Store the earliest available slot.
|
||||
bytes := bytesutil.Uint64ToBytesBigEndian(uint64(earliestAvailableSlot))
|
||||
if err := bucket.Put(earliestAvailableSlotKey, bytes); err != nil {
|
||||
return errors.Wrap(err, "put earliest available slot")
|
||||
}
|
||||
|
||||
// Store the custody group count.
|
||||
bytes = bytesutil.Uint64ToBytesBigEndian(custodyGroupCount)
|
||||
if err := bucket.Put(groupCountKey, bytes); err != nil {
|
||||
return errors.Wrap(err, "put custody group count")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"earliestAvailableSlot": storedEarliestAvailableSlot,
|
||||
"groupCount": storedGroupCount,
|
||||
}).Debug("Custody info")
|
||||
|
||||
return storedEarliestAvailableSlot, storedGroupCount, nil
|
||||
}
|
||||
|
||||
// UpdateSubscribedToAllDataSubnets updates the "subscribed to all data subnets" status in the database
|
||||
// only if `subscribed` is `true`.
|
||||
// It returns the previous subscription status.
|
||||
func (s *Store) UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.UpdateSubscribedToAllDataSubnets")
|
||||
defer span.End()
|
||||
|
||||
result := false
|
||||
if !subscribed {
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket := tx.Bucket(custodyBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the subscribe all data subnets flag.
|
||||
bytes := bucket.Get(subscribeAllDataSubnetsKey)
|
||||
if len(bytes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if bytes[0] == 1 {
|
||||
result = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create custody bucket")
|
||||
}
|
||||
|
||||
bytes := bucket.Get(subscribeAllDataSubnetsKey)
|
||||
if len(bytes) != 0 && bytes[0] == 1 {
|
||||
result = true
|
||||
}
|
||||
|
||||
if err := bucket.Put(subscribeAllDataSubnetsKey, []byte{1}); err != nil {
|
||||
return errors.Wrap(err, "put subscribe all data subnets")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
176
beacon-chain/db/kv/custody_test.go
Normal file
176
beacon-chain/db/kv/custody_test.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// getCustodyInfoFromDB reads the custody info directly from the database for testing purposes.
|
||||
func getCustodyInfoFromDB(t *testing.T, db *Store) (primitives.Slot, uint64) {
|
||||
t.Helper()
|
||||
var earliestSlot primitives.Slot
|
||||
var groupCount uint64
|
||||
|
||||
err := db.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(custodyBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read group count
|
||||
groupCountBytes := bucket.Get(groupCountKey)
|
||||
if len(groupCountBytes) != 0 {
|
||||
groupCount = bytesutil.BytesToUint64BigEndian(groupCountBytes)
|
||||
}
|
||||
|
||||
// Read earliest available slot
|
||||
earliestSlotBytes := bucket.Get(earliestAvailableSlotKey)
|
||||
if len(earliestSlotBytes) != 0 {
|
||||
earliestSlot = primitives.Slot(bytesutil.BytesToUint64BigEndian(earliestSlotBytes))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return earliestSlot, groupCount
|
||||
}
|
||||
|
||||
// getSubscriptionStatusFromDB reads the subscription status directly from the database for testing purposes.
|
||||
func getSubscriptionStatusFromDB(t *testing.T, db *Store) bool {
|
||||
t.Helper()
|
||||
var subscribed bool
|
||||
|
||||
err := db.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(custodyBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
bytes := bucket.Get(subscribeAllDataSubnetsKey)
|
||||
if len(bytes) != 0 && bytes[0] == 1 {
|
||||
subscribed = true
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return subscribed
|
||||
}
|
||||
|
||||
func TestUpdateCustodyInfo(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("initial update with empty database", func(t *testing.T) {
|
||||
const (
|
||||
earliestSlot = primitives.Slot(100)
|
||||
groupCount = uint64(5)
|
||||
)
|
||||
|
||||
db := setupDB(t)
|
||||
|
||||
slot, count, err := db.UpdateCustodyInfo(ctx, earliestSlot, groupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestSlot, slot)
|
||||
require.Equal(t, groupCount, count)
|
||||
|
||||
storedSlot, storedCount := getCustodyInfoFromDB(t, db)
|
||||
require.Equal(t, earliestSlot, storedSlot)
|
||||
require.Equal(t, groupCount, storedCount)
|
||||
})
|
||||
|
||||
t.Run("update with higher group count", func(t *testing.T) {
|
||||
const (
|
||||
initialSlot = primitives.Slot(100)
|
||||
initialCount = uint64(5)
|
||||
earliestSlot = primitives.Slot(200)
|
||||
groupCount = uint64(10)
|
||||
)
|
||||
|
||||
db := setupDB(t)
|
||||
|
||||
_, _, err := db.UpdateCustodyInfo(ctx, initialSlot, initialCount)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot, count, err := db.UpdateCustodyInfo(ctx, earliestSlot, groupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestSlot, slot)
|
||||
require.Equal(t, groupCount, count)
|
||||
|
||||
storedSlot, storedCount := getCustodyInfoFromDB(t, db)
|
||||
require.Equal(t, earliestSlot, storedSlot)
|
||||
require.Equal(t, groupCount, storedCount)
|
||||
})
|
||||
|
||||
t.Run("update with lower group count should not update", func(t *testing.T) {
|
||||
const (
|
||||
initialSlot = primitives.Slot(200)
|
||||
initialCount = uint64(10)
|
||||
earliestSlot = primitives.Slot(300)
|
||||
groupCount = uint64(8)
|
||||
)
|
||||
|
||||
db := setupDB(t)
|
||||
|
||||
_, _, err := db.UpdateCustodyInfo(ctx, initialSlot, initialCount)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot, count, err := db.UpdateCustodyInfo(ctx, earliestSlot, groupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, initialSlot, slot)
|
||||
require.Equal(t, initialCount, count)
|
||||
|
||||
storedSlot, storedCount := getCustodyInfoFromDB(t, db)
|
||||
require.Equal(t, initialSlot, storedSlot)
|
||||
require.Equal(t, initialCount, storedCount)
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpdateSubscribedToAllDataSubnets(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("initial update with empty database - set to false", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
prev, err := db.UpdateSubscribedToAllDataSubnets(ctx, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, prev)
|
||||
|
||||
stored := getSubscriptionStatusFromDB(t, db)
|
||||
require.Equal(t, false, stored)
|
||||
})
|
||||
|
||||
t.Run("attempt to update from true to false (should not change)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
_, err := db.UpdateSubscribedToAllDataSubnets(ctx, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
prev, err := db.UpdateSubscribedToAllDataSubnets(ctx, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, prev)
|
||||
|
||||
stored := getSubscriptionStatusFromDB(t, db)
|
||||
require.Equal(t, true, stored)
|
||||
})
|
||||
|
||||
t.Run("attempt to update from true to false (should not change)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
_, err := db.UpdateSubscribedToAllDataSubnets(ctx, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
prev, err := db.UpdateSubscribedToAllDataSubnets(ctx, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, prev)
|
||||
|
||||
stored := getSubscriptionStatusFromDB(t, db)
|
||||
require.Equal(t, true, stored)
|
||||
})
|
||||
}
|
||||
@@ -19,6 +19,9 @@ var ErrNotFoundGenesisBlockRoot = errors.Wrap(ErrNotFound, "OriginGenesisRoot")
|
||||
// ErrNotFoundFeeRecipient is a not found error specifically for the fee recipient getter
|
||||
var ErrNotFoundFeeRecipient = errors.Wrap(ErrNotFound, "fee recipient")
|
||||
|
||||
// ErrNotFoundMetadataSeqNum is a not found error specifically for the metadata sequence number getter
|
||||
var ErrNotFoundMetadataSeqNum = errors.Wrap(ErrNotFound, "metadata sequence number")
|
||||
|
||||
var errEmptyBlockSlice = errors.New("[]blocks.ROBlock is empty")
|
||||
var errIncorrectBlockParent = errors.New("unexpected missing or forked blocks in a []ROBlock")
|
||||
var errFinalizedChildNotFound = errors.New("unable to find finalized root descending from backfill batch")
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
dbIface "github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz/detect"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -97,8 +98,22 @@ func (s *Store) EnsureEmbeddedGenesis(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if gs != nil && !gs.IsNil() {
|
||||
if !state.IsNil(gs) {
|
||||
return s.SaveGenesisData(ctx, gs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type LegacyGenesisProvider struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
func NewLegacyGenesisProvider(store *Store) *LegacyGenesisProvider {
|
||||
return &LegacyGenesisProvider{store: store}
|
||||
}
|
||||
|
||||
var _ genesis.Provider = &LegacyGenesisProvider{}
|
||||
|
||||
func (p *LegacyGenesisProvider) Genesis(ctx context.Context) (state.BeaconState, error) {
|
||||
return p.store.LegacyGenesisState(ctx)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -152,6 +153,7 @@ func TestEnsureEmbeddedGenesis(t *testing.T) {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
|
||||
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
|
||||
ctx := t.Context()
|
||||
db := setupDB(t)
|
||||
|
||||
|
||||
@@ -123,6 +123,7 @@ var Buckets = [][]byte{
|
||||
|
||||
feeRecipientBucket,
|
||||
registrationBucket,
|
||||
custodyBucket,
|
||||
}
|
||||
|
||||
// KVStoreOption is a functional option that modifies a kv.Store.
|
||||
|
||||
42
beacon-chain/db/kv/p2p.go
Normal file
42
beacon-chain/db/kv/p2p.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// MetadataSeqNum retrieves the p2p metadata sequence number from the database.
|
||||
// It returns 0 and ErrNotFoundMetadataSeqNum if the key does not exist.
|
||||
func (s *Store) MetadataSeqNum(ctx context.Context) (uint64, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.MetadataSeqNum")
|
||||
defer span.End()
|
||||
|
||||
var seqNum uint64
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(chainMetadataBucket)
|
||||
val := bkt.Get(metadataSequenceNumberKey)
|
||||
if val == nil {
|
||||
return ErrNotFoundMetadataSeqNum
|
||||
}
|
||||
|
||||
seqNum = bytesutil.BytesToUint64BigEndian(val)
|
||||
return nil
|
||||
})
|
||||
|
||||
return seqNum, err
|
||||
}
|
||||
|
||||
// SaveMetadataSeqNum saves the p2p metadata sequence number to the database.
|
||||
func (s *Store) SaveMetadataSeqNum(ctx context.Context, seqNum uint64) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveMetadataSeqNum")
|
||||
defer span.End()
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(chainMetadataBucket)
|
||||
val := bytesutil.Uint64ToBytesBigEndian(seqNum)
|
||||
return bkt.Put(metadataSequenceNumberKey, val)
|
||||
})
|
||||
}
|
||||
33
beacon-chain/db/kv/p2p_test.go
Normal file
33
beacon-chain/db/kv/p2p_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestStore_MetadataSeqNum(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
db := setupDB(t)
|
||||
|
||||
seqNum, err := db.MetadataSeqNum(ctx)
|
||||
require.ErrorIs(t, err, ErrNotFoundMetadataSeqNum)
|
||||
assert.Equal(t, uint64(0), seqNum)
|
||||
|
||||
initialSeqNum := uint64(42)
|
||||
err = db.SaveMetadataSeqNum(ctx, initialSeqNum)
|
||||
require.NoError(t, err)
|
||||
|
||||
retrievedSeqNum, err := db.MetadataSeqNum(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, initialSeqNum, retrievedSeqNum)
|
||||
|
||||
updatedSeqNum := uint64(43)
|
||||
err = db.SaveMetadataSeqNum(ctx, updatedSeqNum)
|
||||
require.NoError(t, err)
|
||||
|
||||
retrievedSeqNum, err = db.MetadataSeqNum(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, updatedSeqNum, retrievedSeqNum)
|
||||
}
|
||||
@@ -42,6 +42,7 @@ var (
|
||||
finalizedCheckpointKey = []byte("finalized-checkpoint")
|
||||
powchainDataKey = []byte("powchain-data")
|
||||
lastValidatedCheckpointKey = []byte("last-validated-checkpoint")
|
||||
metadataSequenceNumberKey = []byte("metadata-seq-number")
|
||||
|
||||
// Below keys are used to identify objects are to be fork compatible.
|
||||
// Objects that are only compatible with specific forks should be prefixed with such keys.
|
||||
@@ -70,4 +71,10 @@ var (
|
||||
|
||||
// Migrations
|
||||
migrationsBucket = []byte("migrations")
|
||||
|
||||
// Custody
|
||||
custodyBucket = []byte("custody")
|
||||
groupCountKey = []byte("group-count")
|
||||
earliestAvailableSlotKey = []byte("earliest-available-slot")
|
||||
subscribeAllDataSubnetsKey = []byte("subscribe-all-data-subnets")
|
||||
)
|
||||
|
||||
@@ -6,14 +6,12 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/genesis"
|
||||
statenative "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -65,21 +63,21 @@ func (s *Store) StateOrError(ctx context.Context, blockRoot [32]byte) (state.Bea
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// GenesisState returns the genesis state in beacon chain.
|
||||
func (s *Store) GenesisState(ctx context.Context) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.GenesisState")
|
||||
st, err := genesis.State()
|
||||
if errors.Is(err, genesis.ErrGenesisStateNotInitialized) {
|
||||
log.WithError(err).Error("genesis state not initialized, returning nil state. this should only happen in tests")
|
||||
return nil, nil
|
||||
}
|
||||
return st, err
|
||||
}
|
||||
|
||||
// GenesisState returns the genesis state in beacon chain.
|
||||
func (s *Store) LegacyGenesisState(ctx context.Context) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LegacyGenesisState")
|
||||
defer span.End()
|
||||
|
||||
cached, err := genesis.State(params.BeaconConfig().ConfigName)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
span.SetAttributes(trace.BoolAttribute("cache_hit", cached != nil))
|
||||
if cached != nil {
|
||||
return cached, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
var st state.BeaconState
|
||||
err = s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve genesis block's signing root from blocks bucket,
|
||||
@@ -253,6 +251,10 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
|
||||
if err := s.processElectra(ctx, rawType, rt[:], bucket, valIdxBkt, validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateFulu:
|
||||
if err := s.processFulu(ctx, rawType, rt[:], bucket, valIdxBkt, validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.New("invalid state type")
|
||||
}
|
||||
@@ -368,6 +370,24 @@ func (s *Store) processElectra(ctx context.Context, pbState *ethpb.BeaconStateEl
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) processFulu(ctx context.Context, pbState *ethpb.BeaconStateFulu, rootHash []byte, bucket, valIdxBkt *bolt.Bucket, validatorKey []byte) error {
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(fuluKey, rawObj...))
|
||||
if err := bucket.Put(rootHash, encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rootHash, validatorKey); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) storeValidatorEntriesSeparately(ctx context.Context, tx *bolt.Tx, validatorsEntries map[string]*ethpb.Validator) error {
|
||||
valBkt := tx.Bucket(stateValidatorsBucket)
|
||||
for hashStr, validatorEntry := range validatorsEntries {
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -488,7 +489,7 @@ func TestGenesisState_CanSaveRetrieve(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(1))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), headRoot))
|
||||
require.NoError(t, db.SaveState(t.Context(), st, headRoot))
|
||||
genesis.StoreStateDuringTest(t, st)
|
||||
|
||||
savedGenesisS, err := db.GenesisState(t.Context())
|
||||
require.NoError(t, err)
|
||||
@@ -661,7 +662,7 @@ func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
genesisRoot := [32]byte{'a'}
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot))
|
||||
require.NoError(t, db.SaveState(t.Context(), genesisState, genesisRoot))
|
||||
genesis.StoreStateDuringTest(t, genesisState)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
|
||||
@@ -43,6 +43,7 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
return errors.Wrap(err, "failed to initialize origin block w/ bytes + config+fork")
|
||||
}
|
||||
blk := wblk.Block()
|
||||
slot := blk.Slot()
|
||||
|
||||
blockRoot, err := blk.HashTreeRoot()
|
||||
if err != nil {
|
||||
@@ -51,43 +52,43 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
|
||||
pr := blk.ParentRoot()
|
||||
bf := &dbval.BackfillStatus{
|
||||
LowSlot: uint64(wblk.Block().Slot()),
|
||||
LowSlot: uint64(slot),
|
||||
LowRoot: blockRoot[:],
|
||||
LowParentRoot: pr[:],
|
||||
OriginRoot: blockRoot[:],
|
||||
OriginSlot: uint64(wblk.Block().Slot()),
|
||||
OriginSlot: uint64(slot),
|
||||
}
|
||||
|
||||
if err = s.SaveBackfillStatus(ctx, bf); err != nil {
|
||||
return errors.Wrap(err, "unable to save backfill status data to db for checkpoint sync")
|
||||
}
|
||||
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Info("Saving checkpoint block to db")
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Info("Saving checkpoint data into database")
|
||||
if err := s.SaveBlock(ctx, wblk); err != nil {
|
||||
return errors.Wrap(err, "could not save checkpoint block")
|
||||
return errors.Wrap(err, "save block")
|
||||
}
|
||||
|
||||
// save state
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Info("Calling SaveState")
|
||||
if err = s.SaveState(ctx, state, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
return errors.Wrap(err, "save state")
|
||||
}
|
||||
|
||||
if err = s.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: state.Slot(),
|
||||
Root: blockRoot[:],
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "could not save state summary")
|
||||
return errors.Wrap(err, "save state summary")
|
||||
}
|
||||
|
||||
// mark block as head of chain, so that processing will pick up from this point
|
||||
if err = s.SaveHeadBlockRoot(ctx, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save head block root")
|
||||
return errors.Wrap(err, "save head block root")
|
||||
}
|
||||
|
||||
// save origin block root in a special key, to be used when the canonical
|
||||
// origin (start of chain, ie alternative to genesis) block or state is needed
|
||||
if err = s.SaveOriginCheckpointBlockRoot(ctx, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save origin block root")
|
||||
return errors.Wrap(err, "save origin checkpoint block root")
|
||||
}
|
||||
|
||||
// rebuild the checkpoint from the block
|
||||
@@ -96,15 +97,18 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
chkpt := ðpb.Checkpoint{
|
||||
Epoch: primitives.Epoch(slotEpoch),
|
||||
Root: blockRoot[:],
|
||||
}
|
||||
|
||||
if err = s.SaveJustifiedCheckpoint(ctx, chkpt); err != nil {
|
||||
return errors.Wrap(err, "could not mark checkpoint sync block as justified")
|
||||
return errors.Wrap(err, "save justified checkpoint")
|
||||
}
|
||||
|
||||
if err = s.SaveFinalizedCheckpoint(ctx, chkpt); err != nil {
|
||||
return errors.Wrap(err, "could not mark checkpoint sync block as finalized")
|
||||
return errors.Wrap(err, "save finalized checkpoint")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -3,9 +3,9 @@ package kv
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
@@ -18,7 +18,11 @@ func TestSaveOrigin(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
db := setupDB(t)
|
||||
|
||||
st, err := genesis.State(params.MainnetName)
|
||||
// Initialize genesis with mainnet config - this will load the embedded mainnet state
|
||||
require.NoError(t, genesis.Initialize(ctx, t.TempDir()))
|
||||
|
||||
// Get the initialized genesis state
|
||||
st, err := genesis.State()
|
||||
require.NoError(t, err)
|
||||
|
||||
sb, err := st.MarshalSSZ()
|
||||
|
||||
@@ -25,6 +25,7 @@ go_library(
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
@@ -103,6 +104,7 @@ go_test(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
@@ -125,6 +127,7 @@ go_test(
|
||||
"//contracts/deposit/mock:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//monitoring/clientstats:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
@@ -123,7 +124,7 @@ type Reconstructor interface {
|
||||
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]interfaces.SignedBeaconBlock, error)
|
||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
|
||||
ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error)
|
||||
ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
@@ -651,22 +652,41 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
return verifiedBlobs, nil
|
||||
}
|
||||
|
||||
// ReconstructDataColumnSidecars reconstructs the verified data column sidecars for a given beacon block.
|
||||
// It retrieves the KZG commitments from the block body, fetches the associated blobs and cell proofs from the EL,
|
||||
// and constructs the corresponding verified read-only data column sidecars.
|
||||
func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlock interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
block := signedROBlock.Block()
|
||||
|
||||
func (s *Service) ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
|
||||
root := populator.Root()
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"slot": block.Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": populator.Slot(),
|
||||
})
|
||||
|
||||
kzgCommitments, err := block.Body().BlobKzgCommitments()
|
||||
// Fetch cells and proofs from the execution client using the KZG commitments from the sidecar.
|
||||
cellsAndProofs, err := s.cellsAndProofsForCommitments(ctx, populator.Commitments())
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "blob KZG commitments")
|
||||
return nil, wrapWithBlockRoot(err, root, "fetch cells and proofs from execution client")
|
||||
}
|
||||
|
||||
// Return early if nothing is returned from the EL.
|
||||
if len(cellsAndProofs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Construct data column sidears from the signed block and cells and proofs.
|
||||
roSidecars, err := peerdas.ConstructDataColumnSidecar(cellsAndProofs, populator)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, populator.Root(), "data column sidcars from column sidecar")
|
||||
}
|
||||
|
||||
// Upgrade the sidecars to verified sidecars.
|
||||
// We trust the execution layer we are connected to, so we can upgrade the sidecar into a verified one.
|
||||
verifiedROSidecars := upgradeSidecarsToVerifiedSidecars(roSidecars)
|
||||
|
||||
log.WithField("sourceType", populator.Type()).Debug("Data columns sidecars constructed from the execution client")
|
||||
|
||||
return verifiedROSidecars, nil
|
||||
}
|
||||
|
||||
// cellsAndProofsForCommitments fetches cells and proofs from the execution client (using engine_getBlobsV2 execution API method)
|
||||
func (s *Service) cellsAndProofsForCommitments(ctx context.Context, kzgCommitments [][]byte) ([]kzg.CellsAndProofs, error) {
|
||||
// Collect KZG hashes for all blobs.
|
||||
versionedHashes := make([]common.Hash, 0, len(kzgCommitments))
|
||||
for _, commitment := range kzgCommitments {
|
||||
@@ -677,12 +697,11 @@ func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlo
|
||||
// Fetch all blobsAndCellsProofs from the execution client.
|
||||
blobAndProofV2s, err := s.GetBlobsV2(ctx, versionedHashes)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "get blobs V2")
|
||||
return nil, errors.Wrapf(err, "get blobs V2")
|
||||
}
|
||||
|
||||
// Return early if nothing is returned from the EL.
|
||||
if len(blobAndProofV2s) == 0 {
|
||||
log.Debug("No blobs returned from execution client")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -690,34 +709,31 @@ func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlo
|
||||
blobs, cellProofs := make([][]byte, 0, len(blobAndProofV2s)), make([][]byte, 0, len(blobAndProofV2s))
|
||||
for _, blobsAndProofs := range blobAndProofV2s {
|
||||
if blobsAndProofs == nil {
|
||||
return nil, wrapWithBlockRoot(errMissingBlobsAndProofsFromEL, blockRoot, "")
|
||||
return nil, errMissingBlobsAndProofsFromEL
|
||||
}
|
||||
|
||||
blobs, cellProofs = append(blobs, blobsAndProofs.Blob), append(cellProofs, blobsAndProofs.KzgProofs...)
|
||||
blobs = append(blobs, blobsAndProofs.Blob)
|
||||
cellProofs = append(cellProofs, blobsAndProofs.KzgProofs...)
|
||||
}
|
||||
|
||||
// Construct the data column sidcars from the blobs and cell proofs provided by the execution client.
|
||||
dataColumnSidecars, err := peerdas.ConstructDataColumnSidecars(signedROBlock, blobs, cellProofs)
|
||||
// Compute cells and proofs from the blobs and cell proofs.
|
||||
cellsAndProofs, err := peerdas.ComputeCellsAndProofs(blobs, cellProofs)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "construct data column sidecars")
|
||||
return nil, errors.Wrap(err, "compute cells and proofs")
|
||||
}
|
||||
|
||||
// Finally, construct verified RO data column sidecars.
|
||||
// We trust the execution layer we are connected to, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(dataColumnSidecar, blockRoot)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "new read-only data column with root")
|
||||
}
|
||||
return cellsAndProofs, nil
|
||||
}
|
||||
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
// upgradeSidecarsToVerifiedSidecars upgrades a list of data column sidecars into verified data column sidecars.
|
||||
func upgradeSidecarsToVerifiedSidecars(roSidecars []blocks.RODataColumn) []blocks.VerifiedRODataColumn {
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
|
||||
for _, roSidecar := range roSidecars {
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roSidecar)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
}
|
||||
|
||||
log.Debug("Data columns successfully reconstructed from the execution client")
|
||||
|
||||
return verifiedRODataColumns, nil
|
||||
return verifiedRODataColumns
|
||||
}
|
||||
|
||||
func fullPayloadFromPayloadBody(
|
||||
@@ -1009,6 +1025,6 @@ func toBlockNumArg(number *big.Int) string {
|
||||
}
|
||||
|
||||
// wrapWithBlockRoot returns a new error with the given block root.
|
||||
func wrapWithBlockRoot(err error, blockRoot [32]byte, message string) error {
|
||||
func wrapWithBlockRoot(err error, blockRoot [fieldparams.RootLength]byte, message string) error {
|
||||
return errors.Wrap(err, fmt.Sprintf("%s for block %#x", message, blockRoot))
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
mocks "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
@@ -2556,7 +2557,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
func TestConstructDataColumnSidecarsFromBlock(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
@@ -2580,11 +2581,14 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlock, err := blocks.NewROBlockWithRoot(sb, r)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
|
||||
_, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.ErrorContains(t, "get blobs V2 for block", err)
|
||||
_, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
require.ErrorContains(t, "engine_getBlobsV2 is not supported", err)
|
||||
})
|
||||
|
||||
t.Run("nothing received", func(t *testing.T) {
|
||||
@@ -2594,7 +2598,7 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
})
|
||||
@@ -2607,7 +2611,7 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 128, len(dataColumns))
|
||||
})
|
||||
@@ -2620,10 +2624,134 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
_, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
require.ErrorContains(t, errMissingBlobsAndProofsFromEL.Error(), err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConstructDataColumnSidecarsFromSidecar(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup right fork epoch
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
client := &Service{capabilityCache: &capabilityCache{}}
|
||||
|
||||
// Create KZG commitments for the sidecar
|
||||
kzgCommitments := createRandomKzgCommitments(t, 3)
|
||||
|
||||
// Create a test verified data column sidecar
|
||||
columnData := make([][]byte, 3)
|
||||
for i := range columnData {
|
||||
columnData[i] = make([]byte, kzg.BytesPerCell)
|
||||
columnData[i][0] = byte(i + 0x10)
|
||||
}
|
||||
|
||||
kzgProofs := make([][]byte, 3)
|
||||
for i := range kzgProofs {
|
||||
kzgProofs[i] = make([]byte, 48)
|
||||
kzgProofs[i][0] = byte(i + 0x20)
|
||||
}
|
||||
|
||||
inclusionProof := make([][]byte, 4)
|
||||
for i := range inclusionProof {
|
||||
inclusionProof[i] = make([]byte, 32)
|
||||
inclusionProof[i][0] = byte(i + 0x30)
|
||||
}
|
||||
|
||||
// Create the input VerifiedRODataColumn sidecar using test utility
|
||||
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
|
||||
{
|
||||
Index: 7, // Column index 7
|
||||
Column: columnData,
|
||||
KzgCommitments: kzgCommitments,
|
||||
KzgProofs: kzgProofs,
|
||||
KzgCommitmentsInclusionProof: inclusionProof,
|
||||
Slot: 4 * params.BeaconConfig().SlotsPerEpoch, // Fulu epoch
|
||||
ProposerIndex: 9,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
})
|
||||
require.Equal(t, 1, len(verifiedSidecars))
|
||||
sidecar := verifiedSidecars[0]
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
|
||||
_, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromSidecar(sidecar.RODataColumn))
|
||||
require.NotNil(t, err)
|
||||
require.ErrorContains(t, "engine_getBlobsV2 is not supported", err)
|
||||
})
|
||||
|
||||
t.Run("nothing received", func(t *testing.T) {
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromSidecar(sidecar.RODataColumn))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
})
|
||||
|
||||
t.Run("receiving all blobs", func(t *testing.T) {
|
||||
blobMasks := []bool{true, true, true}
|
||||
srv := createBlobServerV2(t, 3, blobMasks)
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromSidecar(sidecar.RODataColumn))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 128, len(dataColumns)) // NumberOfColumns
|
||||
|
||||
// Verify each sidecar has expected structure
|
||||
for i, sidecar := range dataColumns {
|
||||
require.Equal(t, uint64(i), sidecar.Index)
|
||||
require.Equal(t, 3, len(sidecar.Column)) // 3 blobs
|
||||
require.Equal(t, 3, len(sidecar.KzgCommitments)) // 3 commitments
|
||||
require.Equal(t, 3, len(sidecar.KzgProofs)) // 3 proofs per column
|
||||
|
||||
// Verify commitments are preserved
|
||||
for j, commitment := range sidecar.KzgCommitments {
|
||||
require.DeepEqual(t, kzgCommitments[j], commitment)
|
||||
}
|
||||
|
||||
// Verify inclusion proof is preserved
|
||||
require.Equal(t, len(inclusionProof), len(sidecar.KzgCommitmentsInclusionProof))
|
||||
for j, proof := range sidecar.KzgCommitmentsInclusionProof {
|
||||
require.Equal(t, byte(j+0x30), proof[0])
|
||||
}
|
||||
|
||||
// Verify signed block header is preserved
|
||||
require.Equal(t, primitives.Slot(4*params.BeaconConfig().SlotsPerEpoch), sidecar.SignedBlockHeader.Header.Slot)
|
||||
require.Equal(t, primitives.ValidatorIndex(9), sidecar.SignedBlockHeader.Header.ProposerIndex)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("missing some blobs", func(t *testing.T) {
|
||||
blobMasks := []bool{false, true, true}
|
||||
srv := createBlobServerV2(t, 3, blobMasks)
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
_, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromSidecar(sidecar.RODataColumn))
|
||||
require.ErrorContains(t, errMissingBlobsAndProofsFromEL.Error(), err)
|
||||
})
|
||||
}
|
||||
|
||||
func createRandomKzgCommitments(t *testing.T, num int) [][]byte {
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
contracts "github.com/OffchainLabs/prysm/v6/contracts/deposit"
|
||||
"github.com/OffchainLabs/prysm/v6/contracts/deposit/mock"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/clientstats"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -381,6 +382,7 @@ func TestInitDepositCache_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.beaconDB.SaveGenesisBlockRoot(t.Context(), blockRootA))
|
||||
require.NoError(t, s.cfg.beaconDB.SaveState(t.Context(), emptyState, blockRootA))
|
||||
genesis.StoreStateDuringTest(t, emptyState)
|
||||
s.chainStartData.Chainstarted = true
|
||||
require.NoError(t, s.initDepositCaches(t.Context(), ctrs))
|
||||
require.Equal(t, 3, len(s.cfg.depositCache.PendingContainers(t.Context(), nil)))
|
||||
@@ -446,6 +448,7 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) {
|
||||
require.NoError(t, s.cfg.beaconDB.SaveGenesisBlockRoot(t.Context(), headRoot))
|
||||
require.NoError(t, s.cfg.beaconDB.SaveState(t.Context(), emptyState, headRoot))
|
||||
require.NoError(t, stateGen.SaveState(t.Context(), headRoot, emptyState))
|
||||
genesis.StoreStateDuringTest(t, emptyState)
|
||||
s.cfg.stateGen = stateGen
|
||||
require.NoError(t, emptyState.SetEth1DepositIndex(3))
|
||||
|
||||
@@ -594,6 +597,7 @@ func TestService_EnsureConsistentPowchainData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, genState.SetSlot(1000))
|
||||
|
||||
genesis.StoreStateDuringTest(t, genState)
|
||||
require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(t.Context(), genState))
|
||||
_, err = s1.validPowchainData(t.Context())
|
||||
require.NoError(t, err)
|
||||
@@ -655,6 +659,7 @@ func TestService_EnsureValidPowchainData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, genState.SetSlot(1000))
|
||||
|
||||
genesis.StoreStateDuringTest(t, genState)
|
||||
require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(t.Context(), genState))
|
||||
|
||||
err = s1.cfg.beaconDB.SaveExecutionChainData(t.Context(), ðpb.ETH1ChainData{
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/execution/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -116,7 +117,8 @@ func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadO
|
||||
return e.BlobSidecars, e.ErrorBlobSidecars
|
||||
}
|
||||
|
||||
func (e *EngineClient) ReconstructDataColumnSidecars(context.Context, interfaces.ReadOnlySignedBeaconBlock, [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// ConstructDataColumnSidecars is a mock implementation of the ConstructDataColumnSidecars method.
|
||||
func (e *EngineClient) ConstructDataColumnSidecars(context.Context, peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
|
||||
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"clear_db.go",
|
||||
"config.go",
|
||||
"log.go",
|
||||
"node.go",
|
||||
@@ -23,7 +24,6 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
@@ -50,7 +50,6 @@ go_library(
|
||||
"//beacon-chain/sync/backfill:go_default_library",
|
||||
"//beacon-chain/sync/backfill/coverage:go_default_library",
|
||||
"//beacon-chain/sync/checkpoint:go_default_library",
|
||||
"//beacon-chain/sync/genesis:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd:go_default_library",
|
||||
@@ -60,6 +59,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//monitoring/prometheus:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
|
||||
101
beacon-chain/node/clear_db.go
Normal file
101
beacon-chain/node/clear_db.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/slasherkv"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
type dbClearer struct {
|
||||
shouldClear bool
|
||||
force bool
|
||||
confirmed bool
|
||||
}
|
||||
|
||||
const (
|
||||
clearConfirmation = "This will delete your beacon chain database stored in your data directory. " +
|
||||
"Your database backups will not be removed - do you want to proceed? (Y/N)"
|
||||
|
||||
clearDeclined = "Database will not be deleted. No changes have been made."
|
||||
)
|
||||
|
||||
func (c *dbClearer) clearKV(ctx context.Context, db *kv.Store) (*kv.Store, error) {
|
||||
if !c.shouldProceed() {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
log.Warning("Removing database")
|
||||
if err := db.ClearDB(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
return kv.NewKVStore(ctx, db.DatabasePath())
|
||||
}
|
||||
|
||||
func (c *dbClearer) clearBlobs(bs *filesystem.BlobStorage) error {
|
||||
if !c.shouldProceed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Warning("Removing blob storage")
|
||||
if err := bs.Clear(); err != nil {
|
||||
return errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dbClearer) clearColumns(cs *filesystem.DataColumnStorage) error {
|
||||
if !c.shouldProceed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Warning("Removing data columns storage")
|
||||
if err := cs.Clear(); err != nil {
|
||||
return errors.Wrap(err, "could not clear data columns storage")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dbClearer) clearSlasher(ctx context.Context, db *slasherkv.Store) (*slasherkv.Store, error) {
|
||||
if !c.shouldProceed() {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
log.Warning("Removing slasher database")
|
||||
if err := db.ClearDB(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear slasher database")
|
||||
}
|
||||
return slasherkv.NewKVStore(ctx, db.DatabasePath())
|
||||
}
|
||||
|
||||
func (c *dbClearer) shouldProceed() bool {
|
||||
if !c.shouldClear {
|
||||
return false
|
||||
}
|
||||
if c.force {
|
||||
return true
|
||||
}
|
||||
if !c.confirmed {
|
||||
confirmed, err := cmd.ConfirmAction(clearConfirmation, clearDeclined)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Not clearing db due to confirmation error")
|
||||
return false
|
||||
}
|
||||
c.confirmed = confirmed
|
||||
}
|
||||
return c.confirmed
|
||||
}
|
||||
|
||||
func newDbClearer(cliCtx *cli.Context) *dbClearer {
|
||||
force := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
return &dbClearer{
|
||||
shouldClear: cliCtx.Bool(cmd.ClearDB.Name) || force,
|
||||
force: force,
|
||||
}
|
||||
}
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
@@ -53,7 +52,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/backfill"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/backfill/coverage"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/checkpoint"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/genesis"
|
||||
initialsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/initial-sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd"
|
||||
@@ -63,6 +61,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/container/slice"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/prometheus"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/prereqs"
|
||||
@@ -114,7 +113,7 @@ type BeaconNode struct {
|
||||
slasherAttestationsFeed *event.Feed
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
GenesisInitializer genesis.Initializer
|
||||
GenesisProviders []genesis.Provider
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
@@ -124,11 +123,11 @@ type BeaconNode struct {
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
DataColumnStorageOptions []filesystem.DataColumnStorageOption
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
syncChecker *initialsync.SyncChecker
|
||||
slasherEnabled bool
|
||||
lcStore *lightclient.Store
|
||||
ConfigOptions []params.Option
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -137,18 +136,13 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
if err := configureBeacon(cliCtx); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set beacon configuration options")
|
||||
}
|
||||
|
||||
// Initializes any forks here.
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
registry := runtime.NewServiceRegistry()
|
||||
ctx := cliCtx.Context
|
||||
|
||||
beacon := &BeaconNode{
|
||||
cliCtx: cliCtx,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
services: registry,
|
||||
services: runtime.NewServiceRegistry(),
|
||||
stop: make(chan struct{}),
|
||||
stateFeed: new(event.Feed),
|
||||
blockFeed: new(event.Feed),
|
||||
@@ -166,7 +160,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
serviceFlagOpts: &serviceFlagOpts{},
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
syncChecker: &initialsync.SyncChecker{},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
slasherEnabled: cliCtx.Bool(flags.SlasherFlag.Name),
|
||||
}
|
||||
|
||||
@@ -176,6 +169,25 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
}
|
||||
}
|
||||
|
||||
dbClearer := newDbClearer(cliCtx)
|
||||
dataDir := cliCtx.String(cmd.DataDirFlag.Name)
|
||||
boltFname := filepath.Join(dataDir, kv.BeaconNodeDbDirName)
|
||||
kvdb, err := openDB(ctx, boltFname, dbClearer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not open database")
|
||||
}
|
||||
beacon.db = kvdb
|
||||
|
||||
providers := append(beacon.GenesisProviders, kv.NewLegacyGenesisProvider(kvdb))
|
||||
if err := genesis.Initialize(ctx, dataDir, providers...); err != nil {
|
||||
return nil, errors.Wrap(err, "could not initialize genesis state")
|
||||
}
|
||||
|
||||
beacon.ConfigOptions = append([]params.Option{params.WithGenesisValidatorsRoot(genesis.ValidatorsRoot())}, beacon.ConfigOptions...)
|
||||
params.BeaconConfig().ApplyOptions(beacon.ConfigOptions...)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
params.LogDigests(params.BeaconConfig())
|
||||
|
||||
synchronizer := startup.NewClockSynchronizer()
|
||||
beacon.clockWaiter = synchronizer
|
||||
beacon.forkChoicer = doublylinkedtree.New()
|
||||
@@ -194,6 +206,9 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
}
|
||||
beacon.BlobStorage = blobs
|
||||
}
|
||||
if err := dbClearer.clearBlobs(beacon.BlobStorage); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
|
||||
if beacon.DataColumnStorage == nil {
|
||||
dataColumnStorage, err := filesystem.NewDataColumnStorage(cliCtx.Context, beacon.DataColumnStorageOptions...)
|
||||
@@ -203,8 +218,11 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
|
||||
beacon.DataColumnStorage = dataColumnStorage
|
||||
}
|
||||
if err := dbClearer.clearColumns(beacon.DataColumnStorage); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear data column storage")
|
||||
}
|
||||
|
||||
bfs, err := startBaseServices(cliCtx, beacon, depositAddress)
|
||||
bfs, err := startBaseServices(cliCtx, beacon, depositAddress, dbClearer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not start modules")
|
||||
}
|
||||
@@ -292,7 +310,7 @@ func configureBeacon(cliCtx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress string) (*backfill.Store, error) {
|
||||
func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress string, clearer *dbClearer) (*backfill.Store, error) {
|
||||
ctx := cliCtx.Context
|
||||
log.Debugln("Starting DB")
|
||||
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
|
||||
@@ -300,9 +318,10 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
|
||||
}
|
||||
|
||||
beacon.BlobStorage.WarmCache()
|
||||
beacon.DataColumnStorage.WarmCache()
|
||||
|
||||
log.Debugln("Starting Slashing DB")
|
||||
if err := beacon.startSlasherDB(cliCtx); err != nil {
|
||||
if err := beacon.startSlasherDB(cliCtx, clearer); err != nil {
|
||||
return nil, errors.Wrap(err, "could not start slashing DB")
|
||||
}
|
||||
|
||||
@@ -482,43 +501,6 @@ func (b *BeaconNode) Close() {
|
||||
close(b.stop)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) clearDB(clearDB, forceClearDB bool, d *kv.Store, dbPath string) (*kv.Store, error) {
|
||||
var err error
|
||||
clearDBConfirmed := false
|
||||
|
||||
if clearDB && !forceClearDB {
|
||||
const (
|
||||
actionText = "This will delete your beacon chain database stored in your data directory. " +
|
||||
"Your database backups will not be removed - do you want to proceed? (Y/N)"
|
||||
|
||||
deniedText = "Database will not be deleted. No changes have been made."
|
||||
)
|
||||
|
||||
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not confirm action")
|
||||
}
|
||||
}
|
||||
|
||||
if clearDBConfirmed || forceClearDB {
|
||||
log.Warning("Removing database")
|
||||
if err := d.ClearDB(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
|
||||
if err := b.BlobStorage.Clear(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
|
||||
d, err = kv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create new database")
|
||||
}
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) checkAndSaveDepositContract(depositAddress string) error {
|
||||
knownContract, err := b.db.DepositContractAddress(b.ctx)
|
||||
if err != nil {
|
||||
@@ -542,60 +524,36 @@ func (b *BeaconNode) checkAndSaveDepositContract(depositAddress string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
var depositCache cache.DepositCache
|
||||
|
||||
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
|
||||
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
|
||||
clearDBRequired := cliCtx.Bool(cmd.ClearDB.Name)
|
||||
forceClearDBRequired := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
|
||||
func openDB(ctx context.Context, dbPath string, clearer *dbClearer) (*kv.Store, error) {
|
||||
log.WithField("databasePath", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := kv.NewKVStore(b.ctx, dbPath)
|
||||
d, err := kv.NewKVStore(ctx, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create database at %s", dbPath)
|
||||
return nil, errors.Wrapf(err, "could not create database at %s", dbPath)
|
||||
}
|
||||
|
||||
if clearDBRequired || forceClearDBRequired {
|
||||
d, err = b.clearDB(clearDBRequired, forceClearDBRequired, d, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
d, err = clearer.clearKV(ctx, d)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
|
||||
if err := d.RunMigrations(b.ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return d, d.RunMigrations(ctx)
|
||||
}
|
||||
|
||||
b.db = d
|
||||
|
||||
depositCache, err = depositsnapshot.New()
|
||||
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
depositCache, err := depositsnapshot.New()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create deposit cache")
|
||||
}
|
||||
|
||||
b.depositCache = depositCache
|
||||
|
||||
if b.GenesisInitializer != nil {
|
||||
if err := b.GenesisInitializer.Initialize(b.ctx, d); err != nil {
|
||||
if errors.Is(err, db.ErrExistingGenesisState) {
|
||||
return errors.Errorf("Genesis state flag specified but a genesis state "+
|
||||
"exists already. Run again with --%s and/or ensure you are using the "+
|
||||
"appropriate testnet flag to load the given genesis state.", cmd.ClearDB.Name)
|
||||
}
|
||||
|
||||
return errors.Wrap(err, "could not load genesis from file")
|
||||
}
|
||||
}
|
||||
|
||||
if err := b.db.EnsureEmbeddedGenesis(b.ctx); err != nil {
|
||||
return errors.Wrap(err, "could not ensure embedded genesis")
|
||||
}
|
||||
|
||||
if b.CheckpointInitializer != nil {
|
||||
log.Info("Checkpoint sync - Downloading origin state and block")
|
||||
if err := b.CheckpointInitializer.Initialize(b.ctx, d); err != nil {
|
||||
if err := b.CheckpointInitializer.Initialize(b.ctx, b.db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -607,49 +565,25 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
log.WithField("address", depositAddress).Info("Deposit contract")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context, clearer *dbClearer) error {
|
||||
if !b.slasherEnabled {
|
||||
return nil
|
||||
}
|
||||
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
|
||||
|
||||
if cliCtx.IsSet(flags.SlasherDirFlag.Name) {
|
||||
baseDir = cliCtx.String(flags.SlasherDirFlag.Name)
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
|
||||
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
|
||||
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
|
||||
log.WithField("databasePath", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := slasherkv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clearDBConfirmed := false
|
||||
if clearDB && !forceClearDB {
|
||||
actionText := "This will delete your beacon chain database stored in your data directory. " +
|
||||
"Your database backups will not be removed - do you want to proceed? (Y/N)"
|
||||
deniedText := "Database will not be deleted. No changes have been made."
|
||||
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d, err = clearer.clearSlasher(b.ctx, d)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not clear slasher database")
|
||||
}
|
||||
if clearDBConfirmed || forceClearDB {
|
||||
log.Warning("Removing database")
|
||||
if err := d.ClearDB(); err != nil {
|
||||
return errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
|
||||
d, err = slasherkv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create new database")
|
||||
}
|
||||
}
|
||||
|
||||
b.slasherDB = d
|
||||
return nil
|
||||
}
|
||||
@@ -705,7 +639,6 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
|
||||
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
|
||||
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
|
||||
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
|
||||
QUICPort: cliCtx.Uint(cmd.P2PQUICPort.Name),
|
||||
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
|
||||
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
|
||||
@@ -717,7 +650,6 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
CustodyInfo: b.custodyInfo,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -800,7 +732,6 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
blockchain.WithPayloadIDCache(b.payloadIDCache),
|
||||
blockchain.WithSyncChecker(b.syncChecker),
|
||||
blockchain.WithCustodyInfo(b.custodyInfo),
|
||||
blockchain.WithSlasherEnabled(b.slasherEnabled),
|
||||
blockchain.WithLightClientStore(b.lcStore),
|
||||
)
|
||||
@@ -888,7 +819,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithDataColumnStorage(b.DataColumnStorage),
|
||||
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
|
||||
regularsync.WithAvailableBlocker(bFillStore),
|
||||
regularsync.WithCustodyInfo(b.custodyInfo),
|
||||
regularsync.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
regularsync.WithSlasherEnabled(b.slasherEnabled),
|
||||
regularsync.WithLightClientStore(b.lcStore),
|
||||
regularsync.WithBatchVerifierLimit(b.cliCtx.Int(flags.BatchVerifierLimit.Name)),
|
||||
@@ -915,6 +846,7 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
ClockWaiter: b.clockWaiter,
|
||||
InitialSyncComplete: complete,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
}, opts...)
|
||||
return b.services.RegisterService(is)
|
||||
}
|
||||
@@ -1009,6 +941,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
@@ -1036,6 +969,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
Router: router,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
TrackedValidatorsCache: b.trackedValidatorsCache,
|
||||
PayloadIDCache: b.payloadIDCache,
|
||||
LCStore: b.lcStore,
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/builder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
)
|
||||
|
||||
// Option for beacon node configuration.
|
||||
@@ -51,6 +52,13 @@ func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
|
||||
}
|
||||
}
|
||||
|
||||
func WithConfigOptions(opt ...params.Option) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.ConfigOptions = append(bn.ConfigOptions, opt...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
|
||||
func WithDataColumnStorage(bs *filesystem.DataColumnStorage) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
|
||||
@@ -49,6 +49,7 @@ go_library(
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
@@ -59,6 +60,7 @@ go_library(
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
@@ -71,7 +73,6 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
@@ -168,7 +169,6 @@ go_test(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
@@ -178,6 +178,7 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
@@ -195,7 +196,6 @@ go_test(
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
|
||||
@@ -11,11 +11,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
@@ -274,14 +274,8 @@ func (s *Service) BroadcastLightClientOptimisticUpdate(ctx context.Context, upda
|
||||
return errors.New("attempted to broadcast nil light client optimistic update")
|
||||
}
|
||||
|
||||
forkDigest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not retrieve fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(forkDigest)); err != nil {
|
||||
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(digest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client optimistic update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -300,13 +294,7 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
return errors.New("attempted to broadcast nil light client finality update")
|
||||
}
|
||||
|
||||
forkDigest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not retrieve fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
forkDigest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcFinalityToTopic(forkDigest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client finality update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
@@ -318,22 +306,16 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// BroadcastDataColumnSidecar broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input column subnet.
|
||||
func (s *Service) BroadcastDataColumn(
|
||||
root [fieldparams.RootLength]byte,
|
||||
func (s *Service) BroadcastDataColumnSidecar(
|
||||
dataColumnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
dataColumnSidecar blocks.VerifiedRODataColumn,
|
||||
) error {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumn")
|
||||
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumnSidecar")
|
||||
defer span.End()
|
||||
|
||||
// Ensure the data column sidecar is not nil.
|
||||
if dataColumnSidecar == nil {
|
||||
return errors.Errorf("attempted to broadcast nil data column sidecar at subnet %d", dataColumnSubnet)
|
||||
}
|
||||
|
||||
// Retrieve the current fork digest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
@@ -343,20 +325,19 @@ func (s *Service) BroadcastDataColumn(
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
|
||||
go s.internalBroadcastDataColumn(ctx, root, dataColumnSubnet, dataColumnSidecar, forkDigest)
|
||||
go s.internalBroadcastDataColumnSidecar(ctx, dataColumnSubnet, dataColumnSidecar, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastDataColumn(
|
||||
func (s *Service) internalBroadcastDataColumnSidecar(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
columnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
dataColumnSidecar blocks.VerifiedRODataColumn,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
// Add tracing to the function.
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumn")
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumnSidecar")
|
||||
defer span.End()
|
||||
|
||||
// Increase the number of broadcast attempts.
|
||||
@@ -398,7 +379,7 @@ func (s *Service) internalBroadcastDataColumn(
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"timeSinceSlotStart": time.Since(slotStartTime),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"root": fmt.Sprintf("%#x", dataColumnSidecar.BlockRoot()),
|
||||
"columnSubnet": columnSubnet,
|
||||
}).Debug("Broadcasted data column sidecar")
|
||||
|
||||
|
||||
@@ -15,12 +15,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
testpb "github.com/OffchainLabs/prysm/v6/proto/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -59,6 +60,7 @@ func TestService_Broadcast(t *testing.T) {
|
||||
topic := "/eth2/%x/testing"
|
||||
// Set a test gossip mapping for testpb.TestSimpleMessage.
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = topic
|
||||
p.clock = startup.NewClock(p.genesisTime, bytesutil.ToBytes32(p.genesisValidatorsRoot))
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest)
|
||||
@@ -227,6 +229,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -255,6 +258,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
// Set for 2nd peer
|
||||
@@ -263,7 +267,8 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
bitV := bitfield.NewBitvector64()
|
||||
bitV.SetBitAt(subnet, true)
|
||||
s.updateSubnetRecordWithMetadata(bitV)
|
||||
err := s.updateSubnetRecordWithMetadata(bitV)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
listeners = append(listeners, listener)
|
||||
@@ -548,9 +553,7 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientOptimisticUpdateTopicFormat
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot), p.genesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, digest)
|
||||
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
@@ -614,9 +617,7 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientFinalityUpdateTopicFormat
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot), p.genesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, digest)
|
||||
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
@@ -695,6 +696,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ScorerParams: &scorers.Config{}}),
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
// Create a listener.
|
||||
@@ -709,13 +711,8 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(columnIndex)
|
||||
topic := fmt.Sprintf(topicFormat, digest, subnet) + service.Encoding().ProtocolSuffix()
|
||||
|
||||
roSidecars, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: columnIndex}})
|
||||
sidecar := roSidecars[0].DataColumnSidecar
|
||||
|
||||
// Attempt to broadcast nil object should fail.
|
||||
var emptyRoot [fieldparams.RootLength]byte
|
||||
err = service.BroadcastDataColumn(emptyRoot, subnet, nil)
|
||||
require.ErrorContains(t, "attempted to broadcast nil", err)
|
||||
_, verifiedRoSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: columnIndex}})
|
||||
verifiedRoSidecar := verifiedRoSidecars[0]
|
||||
|
||||
// Subscribe to the topic.
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
@@ -725,7 +722,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
err = service.BroadcastDataColumn(emptyRoot, subnet, sidecar)
|
||||
err = service.BroadcastDataColumnSidecar(subnet, verifiedRoSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Receive the message.
|
||||
@@ -737,5 +734,5 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
|
||||
var result ethpb.DataColumnSidecar
|
||||
require.NoError(t, service.Encoding().DecodeGossip(msg.Data, &result))
|
||||
require.DeepEqual(t, &result, sidecar)
|
||||
require.DeepEqual(t, &result, verifiedRoSidecar)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"time"
|
||||
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
)
|
||||
@@ -28,7 +27,6 @@ type Config struct {
|
||||
PrivateKey string
|
||||
DataDir string
|
||||
DiscoveryDir string
|
||||
MetaDataDir string
|
||||
QUICPort uint
|
||||
TCPPort uint
|
||||
UDPPort uint
|
||||
@@ -38,9 +36,8 @@ type Config struct {
|
||||
AllowListCIDR string
|
||||
DenyListCIDR []string
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabase
|
||||
DB db.ReadOnlyDatabaseWithSeqNum
|
||||
ClockWaiter startup.ClockWaiter
|
||||
CustodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// validateConfig validates whether the values provided are accurate and will set
|
||||
|
||||
@@ -3,11 +3,114 @@ package p2p
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var _ DataColumnsHandler = (*Service)(nil)
|
||||
var _ CustodyManager = (*Service)(nil)
|
||||
|
||||
// EarliestAvailableSlot returns the earliest available slot.
|
||||
func (s *Service) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
s.custodyInfoLock.RLock()
|
||||
defer s.custodyInfoLock.RUnlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
return 0, errors.New("no custody info available")
|
||||
}
|
||||
|
||||
return s.custodyInfo.earliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCount returns the custody group count.
|
||||
func (s *Service) CustodyGroupCount() (uint64, error) {
|
||||
s.custodyInfoLock.Lock()
|
||||
defer s.custodyInfoLock.Unlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
return 0, errors.New("no custody info available")
|
||||
}
|
||||
|
||||
return s.custodyInfo.groupCount, nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfo updates the stored custody group count to the incoming one
|
||||
// if the incoming one is greater than the stored one. In this case, the
|
||||
// incoming earliest available slot should be greater than or equal to the
|
||||
// stored one or an error is returned.
|
||||
//
|
||||
// - If there is no stored custody info, or
|
||||
// - If the incoming earliest available slot is greater than or equal to the
|
||||
// fulu fork slot and the incoming custody group count is greater than the
|
||||
// number of samples per slot
|
||||
//
|
||||
// then the stored earliest available slot is updated to the incoming one.
|
||||
//
|
||||
// This function returns a boolean indicating whether the custody info was
|
||||
// updated and the (possibly updated) custody info itself.
|
||||
//
|
||||
// Rationale:
|
||||
// - The custody group count can only be increased (specification)
|
||||
// - If the custody group count is increased before Fulu, we can still serve
|
||||
// all the data, since there is no sharding before Fulu. As a consequence
|
||||
// we do not need to update the earliest available slot in this case.
|
||||
// - If the custody group count is increased after Fulu, but to a value less
|
||||
// than or equal to the number of samples per slot, we can still serve all
|
||||
// the data, since we store all sampled data column sidecars in all cases.
|
||||
// As a consequence, we do not need to update the earliest available slot
|
||||
// - If the custody group count is increased after Fulu to a value higher than
|
||||
// the number of samples per slot, then, until the backfill is complete, we
|
||||
// are unable to serve the data column sidecars corresponding to the new
|
||||
// custody groups. As a consequence, we need to update the earliest
|
||||
// available slot to inform the peers that we are not able to serve data
|
||||
// column sidecars before this point.
|
||||
func (s *Service) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
s.custodyInfoLock.Lock()
|
||||
defer s.custodyInfoLock.Unlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
s.custodyInfo = &custodyInfo{
|
||||
earliestAvailableSlot: earliestAvailableSlot,
|
||||
groupCount: custodyGroupCount,
|
||||
}
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
inMemory := s.custodyInfo
|
||||
if custodyGroupCount <= inMemory.groupCount {
|
||||
return inMemory.earliestAvailableSlot, inMemory.groupCount, nil
|
||||
}
|
||||
|
||||
if earliestAvailableSlot < inMemory.earliestAvailableSlot {
|
||||
return 0, 0, errors.Errorf(
|
||||
"earliest available slot %d is less than the current one %d. (custody group count: %d, current one: %d)",
|
||||
earliestAvailableSlot, inMemory.earliestAvailableSlot, custodyGroupCount, inMemory.groupCount,
|
||||
)
|
||||
}
|
||||
|
||||
if custodyGroupCount <= samplesPerSlot {
|
||||
inMemory.groupCount = custodyGroupCount
|
||||
return inMemory.earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
fuluForkSlot, err := fuluForkSlot()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "fulu fork slot")
|
||||
}
|
||||
|
||||
if earliestAvailableSlot < fuluForkSlot {
|
||||
inMemory.groupCount = custodyGroupCount
|
||||
return inMemory.earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
inMemory.earliestAvailableSlot = earliestAvailableSlot
|
||||
inMemory.groupCount = custodyGroupCount
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromPeer retrieves custody group count from a peer.
|
||||
// It first tries to get the custody group count from the peer's metadata,
|
||||
@@ -52,6 +155,7 @@ func (s *Service) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"defaultValue": custodyRequirement,
|
||||
"agent": agentString(pid, s.Host()),
|
||||
})
|
||||
|
||||
// Retrieve the ENR of the peer.
|
||||
@@ -72,3 +176,19 @@ func (s *Service) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
|
||||
return custodyGroupCount
|
||||
}
|
||||
|
||||
func fuluForkSlot() (primitives.Slot, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
fuluForkEpoch := beaconConfig.FuluForkEpoch
|
||||
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
|
||||
return beaconConfig.FarFutureSlot, nil
|
||||
}
|
||||
|
||||
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "epoch start")
|
||||
}
|
||||
|
||||
return forkFuluSlot, nil
|
||||
}
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
@@ -15,6 +19,174 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
)
|
||||
|
||||
func TestEarliestAvailableSlot(t *testing.T) {
|
||||
t.Run("No custody info available", func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: nil,
|
||||
}
|
||||
|
||||
_, err := service.EarliestAvailableSlot()
|
||||
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Valid custody info", func(t *testing.T) {
|
||||
const expected primitives.Slot = 100
|
||||
|
||||
service := &Service{
|
||||
custodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: expected,
|
||||
},
|
||||
}
|
||||
|
||||
slot, err := service.EarliestAvailableSlot()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, slot)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCustodyGroupCount(t *testing.T) {
|
||||
t.Run("No custody info available", func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: nil,
|
||||
}
|
||||
|
||||
_, err := service.CustodyGroupCount()
|
||||
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, strings.Contains(err.Error(), "no custody info available"))
|
||||
})
|
||||
|
||||
t.Run("Valid custody info", func(t *testing.T) {
|
||||
const expected uint64 = 5
|
||||
|
||||
service := &Service{
|
||||
custodyInfo: &custodyInfo{
|
||||
groupCount: expected,
|
||||
},
|
||||
}
|
||||
|
||||
count, err := service.CustodyGroupCount()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, count)
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpdateCustodyInfo(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.SamplesPerSlot = 8
|
||||
config.FuluForkEpoch = 10
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
initialCustodyInfo *custodyInfo
|
||||
inputSlot primitives.Slot
|
||||
inputGroupCount uint64
|
||||
expectedUpdated bool
|
||||
expectedSlot primitives.Slot
|
||||
expectedGroupCount uint64
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "First time setting custody info",
|
||||
initialCustodyInfo: nil,
|
||||
inputSlot: 100,
|
||||
inputGroupCount: 5,
|
||||
expectedUpdated: true,
|
||||
expectedSlot: 100,
|
||||
expectedGroupCount: 5,
|
||||
},
|
||||
{
|
||||
name: "Group count decrease - no update",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 50,
|
||||
groupCount: 10,
|
||||
},
|
||||
inputSlot: 60,
|
||||
inputGroupCount: 8,
|
||||
expectedUpdated: false,
|
||||
expectedSlot: 50,
|
||||
expectedGroupCount: 10,
|
||||
},
|
||||
{
|
||||
name: "Earliest slot decrease - error",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 100,
|
||||
groupCount: 5,
|
||||
},
|
||||
inputSlot: 50,
|
||||
inputGroupCount: 10,
|
||||
expectedErr: "earliest available slot 50 is less than the current one 100",
|
||||
},
|
||||
{
|
||||
name: "Group count increase but <= samples per slot",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 50,
|
||||
groupCount: 5,
|
||||
},
|
||||
inputSlot: 60,
|
||||
inputGroupCount: 8,
|
||||
expectedUpdated: true,
|
||||
expectedSlot: 50,
|
||||
expectedGroupCount: 8,
|
||||
},
|
||||
{
|
||||
name: "Group count increase > samples per slot, before Fulu fork",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 50,
|
||||
groupCount: 5,
|
||||
},
|
||||
inputSlot: 60,
|
||||
inputGroupCount: 15,
|
||||
expectedUpdated: true,
|
||||
expectedSlot: 50,
|
||||
expectedGroupCount: 15,
|
||||
},
|
||||
{
|
||||
name: "Group count increase > samples per slot, after Fulu fork",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 50,
|
||||
groupCount: 5,
|
||||
},
|
||||
inputSlot: 500,
|
||||
inputGroupCount: 15,
|
||||
expectedUpdated: true,
|
||||
expectedSlot: 500,
|
||||
expectedGroupCount: 15,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: tc.initialCustodyInfo,
|
||||
}
|
||||
|
||||
slot, groupCount, err := service.UpdateCustodyInfo(tc.inputSlot, tc.inputGroupCount)
|
||||
|
||||
if tc.expectedErr != "" {
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, strings.Contains(err.Error(), tc.expectedErr))
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedSlot, slot)
|
||||
require.Equal(t, tc.expectedGroupCount, groupCount)
|
||||
|
||||
if tc.expectedUpdated {
|
||||
require.NotNil(t, service.custodyInfo)
|
||||
require.Equal(t, tc.expectedSlot, service.custodyInfo.earliestAvailableSlot)
|
||||
require.Equal(t, tc.expectedGroupCount, service.custodyInfo.groupCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyGroupCountFromPeer(t *testing.T) {
|
||||
const (
|
||||
expectedENR uint64 = 7
|
||||
@@ -98,6 +270,7 @@ func TestCustodyGroupCountFromPeer(t *testing.T) {
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
metaData: tc.metadata,
|
||||
host: testp2p.NewTestP2P(t).Host(),
|
||||
}
|
||||
|
||||
// Retrieve the custody count from the remote peer.
|
||||
@@ -109,3 +282,60 @@ func TestCustodyGroupCountFromPeer(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCustodyGroupCountFromPeerENR(t *testing.T) {
|
||||
const (
|
||||
expectedENR uint64 = 7
|
||||
pid = "test-id"
|
||||
)
|
||||
|
||||
cgc := peerdas.Cgc(expectedENR)
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
record *enr.Record
|
||||
expected uint64
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "No ENR record",
|
||||
record: nil,
|
||||
expected: custodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "Empty ENR record",
|
||||
record: &enr.Record{},
|
||||
expected: custodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "Valid ENR with custody group count",
|
||||
record: func() *enr.Record {
|
||||
record := &enr.Record{}
|
||||
record.Set(cgc)
|
||||
return record
|
||||
}(),
|
||||
expected: expectedENR,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
peers := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
|
||||
if tc.record != nil {
|
||||
peers.Add(tc.record, pid, nil, network.DirOutbound)
|
||||
}
|
||||
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
host: testp2p.NewTestP2P(t).Host(),
|
||||
}
|
||||
|
||||
actual := service.custodyGroupCountFromPeerENR(pid)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user