Compare commits

..

43 Commits

Author SHA1 Message Date
rkapka
16dcc4bf5e zip only json 2025-07-07 18:10:09 +02:00
Radosław Kapka
46afec9cd2 Merge branch 'develop' into develop2 2025-07-07 15:10:47 +02:00
terence
2d2507b907 Attest timely by default (#15410)
* Attest timely by default

* Fix deprecated flag naming
2025-07-04 18:28:48 +00:00
terence
a701f07f3a Increase mainnet default builder gas limit to 45M (#15455) 2025-07-04 15:58:32 +00:00
terence
f4bbe5ca40 Add batch verifier limit (#15467) 2025-07-04 15:57:33 +00:00
Manu NALEPA
4be8de2476 PeerDAS: Implement reconstruction of data column sidecars retrieved from the execution client. (#15469)
* `BestFinalized`: No functional change. Improve comments and reduce scope.

* PeerDAS execution: Implement engine method `GetBlobsV2` and `ReconstructDataColumnSidecars`.

* Fix James' comment.

* Fix James' comment.
2025-07-03 22:35:28 +00:00
james-prysm
fac509a3e6 duties v2 fix no assignment panic (#15466)
* adding fix

* preston's suggestion
2025-07-03 15:44:51 +00:00
Manu NALEPA
b1ac8209b2 PeerDAS: Implement reconstruct. (#15454)
* PeerDAS: Implement reconstruct.

* Fix Preston's comment.

* Fix Preston's comment.
2025-07-02 19:02:55 +00:00
Bastin
74c9586c66 refactor lc kv tests (#15465)
* refactor lc kv tests

* clean up

* Update lightclient_test.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-07-02 13:24:32 +00:00
Bastin
f0ad3dfaeb Refactor lc bootstrap tests (#15462)
* fix versioning

* changelog

* fix blockchain tests

* fix linter issue

* fix spec tests

* fix default lc update version

* fix lc header version

* gzl

* clean up the code

* Update testing/spectest/shared/common/light_client/update_ranking.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* add fulu set up in update ranking

* pass att block to createDefaultLCUpdate

* address comments

* linter

* Update lightclient.go

* refactor lc bootstrap tests

* changelog

* sort imports

* refactor lc bootstrap tests

* changelog

* Implement the new Fulu Metadata. (#15440)

* clean up

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2025-07-02 13:23:39 +00:00
Bastin
2540196747 Put the initialization of LC Store behind the enable-light-client flag (#15464)
* put lc store behind flag

* lint

* remove extra line
2025-07-02 11:09:37 +00:00
Manu NALEPA
f133751cce --chain-config-file: Do not use any more mainnet boot nodes. (#15460) 2025-07-02 09:36:31 +00:00
Bastin
bddcc158e4 Fix LC versioning bug (#15400)
* fix versioning

* changelog

* fix blockchain tests

* fix linter issue

* fix spec tests

* fix default lc update version

* fix lc header version

* gzl

* clean up the code

* Update testing/spectest/shared/common/light_client/update_ranking.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* add fulu set up in update ranking

* pass att block to createDefaultLCUpdate

* address comments

* linter

* Update lightclient.go

* sort imports

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2025-07-01 21:40:18 +00:00
Manu NALEPA
bc7664321b Implement the new Fulu Metadata. (#15440) 2025-07-01 07:07:32 +00:00
Manu NALEPA
97f416b3a7 dataColumnSidecarByRootRPCHandler: Do not rely any more on map iteration, which does not produce reproducible output order. (#15441) 2025-06-26 13:07:19 +00:00
Manu NALEPA
1c1e0f38bb PeerDAS: Implement beacon API blob sidecar endpoint for Fulu (#15436)
* `TestGetBlob`: Refactor (no functional change).

* `GenerateTestFuluBlockWithSidecars`: Generate commitments consistent with blobs.

This is actually not needed for this PR, but it does not hurt.

* Beacon API: Implement blob sidecars endpoint for Fulu.

* Fix Radek's comment.

* Update beacon-chain/rpc/lookup/blocker.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Fix Radek's comment.

* Update beacon-chain/rpc/lookup/blocker.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-06-26 10:05:22 +00:00
Manu NALEPA
121914d0d7 Implement SendDataColumnSidecarsByRangeRequest and SendDataColumnSidecarsByRootRequest. (#15430)
* Implement `SendDataColumnSidecarsByRangeRequest` and `SendDataColumnSidecarsByRootRequest`.

* Fix James's comment.

* Fix James's comment.

* Fix James' comment.

* Fix marshaller.
2025-06-24 21:40:16 +00:00
Manu NALEPA
e8625cd89d Peerdas various (#15423)
* Topic mapping: Groupe `const` and `var`.

Cosmetic change, no functional change.

* `TopicFromMessage`: Do not assume anymore that all Fulu specific topics are V3 only.

* Proto: Remove unused `DataColumnIdentifier` and add new `StatusV2`.

`DataColumnIdentifier` was removed in the spec here: https://github.com/ethereum/consensus-specs/pull/4284. Eventually, we stopped using it in Prysm, but never removed the corresponding proto message.

The new `StatusV2` is introduced in the spec here: https://github.com/ethereum/consensus-specs/pull/4374

* `readChunkedDataColumnSideCar` ==> `readChunkedDataColumnSidecar`.

* `rpc_send_request.go`: Reorganize file (no function change).

* `readChunkedDataColumnSidecar`: Add `validationFunctions` parameter and add tests.
2025-06-23 14:47:02 +00:00
terence
667aaf1564 Remove invalid from logs for blob sidecars missing parent or out of range (#15428) 2025-06-22 21:55:44 +00:00
Bastin
e020907d2a Revert "SSZ support for submitAttestationsV2 (#15422)" (#15426)
This reverts commit 9927cea35a.
2025-06-20 21:21:16 +00:00
Bastin
9927cea35a SSZ support for submitAttestationsV2 (#15422)
* ssz support for submitAttestationsV2

* changelog

* lowercase errors

* save 1 line

* Update beacon-chain/rpc/eth/beacon/handlers_pool.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* add comment

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-06-20 17:21:48 +00:00
Radosław Kapka
6023a0d45f Merge branch 'develop' into develop2 2025-06-20 13:12:25 +02:00
Rose Jethani
a8a2866798 Merge branch 'develop' into develop2 2025-06-17 04:56:44 +05:30
Rose Jethani
011150e93e Merge branch 'develop' into develop2 2025-05-30 21:18:59 +05:30
rose2221
3bd52c706a fixed e2e_test 2025-05-30 17:52:03 +05:30
Rose Jethani
f58019ba17 Merge branch 'OffchainLabs:develop' into develop2 2025-05-30 00:36:36 +05:30
Rose Jethani
230d2af015 Merge branch 'develop' into develop2 2025-05-27 12:57:04 +05:30
Rose Jethani
3f6c6e935f Merge branch 'prysmaticlabs:develop' into develop 2025-03-24 17:46:27 +05:30
Rose Jethani
1d50bc0ebf Merge branch 'prysmaticlabs:develop' into develop 2025-03-21 12:05:04 +05:30
rkapka
281bdd84b4 formatting 2025-03-18 12:34:51 +01:00
Radosław Kapka
c0368681d3 Merge branch 'develop' into develop 2025-03-18 11:58:15 +01:00
rose2221
7d6afc3412 updated bazel 2025-03-14 23:35:48 +05:30
Rose Jethani
a168bc256d Merge branch 'prysmaticlabs:develop' into develop 2025-03-14 23:32:47 +05:30
Radosław Kapka
c8187616d8 Merge branch 'develop' into develop 2025-03-14 13:22:14 +01:00
Radosław Kapka
4bc6df1f50 Update api/server/middleware/middleware_test.go 2025-03-14 13:21:54 +01:00
james-prysm
643962029a Merge branch 'develop' into develop 2025-03-12 10:20:24 -05:00
Rose Jethani
a95f7b4867 Merge branch 'develop' into develop 2025-03-05 15:04:37 +05:30
rose2221
574108644c Added tests' 2025-02-26 14:50:19 +05:30
Radosław Kapka
670b2b8291 Update changelog/rose2221-develop.md 2025-02-25 13:46:35 +01:00
rose2221
3198d64a7c fixed AcceptEncodingHeaderHandler 2025-02-25 18:04:55 +05:30
Rose Jethani
26645ed724 Merge branch 'develop' into develop 2025-02-24 22:22:42 +05:30
Radosław Kapka
ee3541534c Update api/server/middleware/middleware.go 2025-02-24 12:41:49 +01:00
rose2221
3e429b7d55 files changed 2025-02-24 15:30:44 +05:30
287 changed files with 6810 additions and 10739 deletions

View File

@@ -16,6 +16,7 @@ go_library(
"//api/server/structs:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -9,6 +9,7 @@ import (
"net/url"
"path"
"regexp"
"sort"
"strconv"
"github.com/OffchainLabs/prysm/v6/api/client"
@@ -16,6 +17,7 @@ import (
"github.com/OffchainLabs/prysm/v6/api/server/structs"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
@@ -135,6 +137,24 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
return fr.ToConsensus()
}
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
body, err := c.Get(ctx, getForkSchedulePath)
if err != nil {
return nil, errors.Wrap(err, "error requesting fork schedule")
}
fsr := &forkScheduleResponse{}
err = json.Unmarshal(body, fsr)
if err != nil {
return nil, err
}
ofs, err := fsr.OrderedForkSchedule()
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
}
return ofs, nil
}
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
func (c *Client) GetConfigSpec(ctx context.Context) (*structs.GetSpecResponse, error) {
body, err := c.Get(ctx, getConfigSpecPath)
@@ -314,3 +334,31 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*structs.BLSToEx
}
return poolResponse, nil
}
type forkScheduleResponse struct {
Data []structs.Fork
}
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
ofs := make(forks.OrderedSchedule, 0)
for _, d := range fsr.Data {
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
}
vSlice, err := hexutil.Decode(d.CurrentVersion)
if err != nil {
return nil, err
}
if len(vSlice) != 4 {
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
}
version := bytesutil.ToBytes4(vSlice)
ofs = append(ofs, forks.ForkScheduleEntry{
Version: version,
Epoch: primitives.Epoch(epoch),
})
}
sort.Sort(ofs)
return ofs, nil
}

View File

@@ -8,7 +8,11 @@ go_library(
],
importpath = "github.com/OffchainLabs/prysm/v6/api/server/middleware",
visibility = ["//visibility:public"],
deps = ["@com_github_rs_cors//:go_default_library"],
deps = [
"//api:go_default_library",
"@com_github_rs_cors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
@@ -22,5 +26,6 @@ go_test(
"//api:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -1,11 +1,14 @@
package middleware
import (
"compress/gzip"
"fmt"
"net/http"
"strings"
"github.com/OffchainLabs/prysm/v6/api"
"github.com/rs/cors"
log "github.com/sirupsen/logrus"
)
type Middleware func(http.Handler) http.Handler
@@ -112,6 +115,46 @@ func AcceptHeaderHandler(serverAcceptedTypes []string) Middleware {
}
}
// AcceptEncodingHeaderHandler compresses the response before sending it back to the client, if gzip is supported.
func AcceptEncodingHeaderHandler() Middleware {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
next.ServeHTTP(w, r)
return
}
gz := gzip.NewWriter(w)
gzipRW := &gzipResponseWriter{gz: gz, ResponseWriter: w}
defer func() {
if !gzipRW.zipped {
return
}
if err := gz.Close(); err != nil {
log.WithError(err).Error("Failed to close gzip writer")
}
}()
next.ServeHTTP(gzipRW, r)
})
}
}
type gzipResponseWriter struct {
gz *gzip.Writer
http.ResponseWriter
zipped bool
}
func (g *gzipResponseWriter) Write(b []byte) (int, error) {
if strings.Contains(g.Header().Get("Content-Type"), api.JsonMediaType) {
g.zipped = true
g.Header().Set("Content-Encoding", "gzip")
return g.gz.Write(b)
}
return g.ResponseWriter.Write(b)
}
func MiddlewareChain(h http.Handler, mw []Middleware) http.Handler {
if len(mw) < 1 {
return h

View File

@@ -1,12 +1,16 @@
package middleware
import (
"bytes"
"compress/gzip"
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/OffchainLabs/prysm/v6/api"
"github.com/OffchainLabs/prysm/v6/testing/require"
log "github.com/sirupsen/logrus"
)
func TestNormalizeQueryValuesHandler(t *testing.T) {
@@ -124,6 +128,89 @@ func TestContentTypeHandler(t *testing.T) {
}
}
func TestAcceptEncodingHeaderHandler(t *testing.T) {
dummyContent := "Test gzip middleware content"
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", r.Header.Get("Accept"))
_, err := w.Write([]byte(dummyContent))
require.NoError(t, err)
})
handler := AcceptEncodingHeaderHandler()(nextHandler)
tests := []struct {
name string
accept string
acceptEncoding string
expectCompressed bool
}{
{
name: "Gzip supported",
accept: api.JsonMediaType,
acceptEncoding: "gzip",
expectCompressed: true,
},
{
name: "Multiple encodings supported",
accept: api.JsonMediaType,
acceptEncoding: "deflate, gzip",
expectCompressed: true,
},
{
name: "Gzip not supported",
accept: api.JsonMediaType,
acceptEncoding: "deflate",
expectCompressed: false,
},
{
name: "No accept encoding header",
accept: api.JsonMediaType,
acceptEncoding: "",
expectCompressed: false,
},
{
name: "SSZ",
accept: api.OctetStreamMediaType,
acceptEncoding: "gzip",
expectCompressed: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
req := httptest.NewRequest("GET", "/", nil)
req.Header.Set("Accept", tt.accept)
if tt.acceptEncoding != "" {
req.Header.Set("Accept-Encoding", tt.acceptEncoding)
}
rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req)
if tt.expectCompressed {
require.Equal(t, "gzip", rr.Header().Get("Content-Encoding"), "Expected Content-Encoding header to be 'gzip'")
compressedBody := rr.Body.Bytes()
require.NotEqual(t, dummyContent, string(compressedBody), "Response body should be compressed and differ from the original")
gzReader, err := gzip.NewReader(bytes.NewReader(compressedBody))
require.NoError(t, err, "Failed to create gzipReader")
defer func() {
if err := gzReader.Close(); err != nil {
log.WithError(err).Error("Failed to close gzip reader")
}
}()
decompressedBody, err := io.ReadAll(gzReader)
require.NoError(t, err, "Failed to decompress response body")
require.Equal(t, dummyContent, string(decompressedBody), "Decompressed content should match the original")
} else {
require.Equal(t, dummyContent, rr.Body.String(), "Response body should be uncompressed and match the original")
}
})
}
}
func TestAcceptHeaderHandler(t *testing.T) {
acceptedTypes := []string{"application/json", "application/octet-stream"}

View File

@@ -12,12 +12,16 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/config/params"
consensusblocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
payloadattribute "github.com/OffchainLabs/prysm/v6/consensus-types/payload-attribute"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
@@ -32,7 +36,152 @@ var defaultLatestValidHash = bytesutil.PadTo([]byte{0xff}, 32)
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*enginev1.PayloadIDBytes, error) {
return nil, nil
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdate")
defer span.End()
if arg.headBlock == nil || arg.headBlock.IsNil() {
log.Error("Head block is nil")
return nil, nil
}
headBlk := arg.headBlock.Block()
if headBlk == nil || headBlk.IsNil() || headBlk.Body().IsNil() {
log.Error("Head block is nil")
return nil, nil
}
// Must not call fork choice updated until the transition conditions are met on the Pow network.
isExecutionBlk, err := blocks.IsExecutionBlock(headBlk.Body())
if err != nil {
log.WithError(err).Error("Could not determine if head block is execution block")
return nil, nil
}
if !isExecutionBlk {
return nil, nil
}
headPayload, err := headBlk.Body().Execution()
if err != nil {
log.WithError(err).Error("Could not get execution payload for head block")
return nil, nil
}
finalizedHash := s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
justifiedHash := s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash(),
SafeBlockHash: justifiedHash[:],
FinalizedBlockHash: finalizedHash[:],
}
if len(fcs.HeadBlockHash) != 32 || [32]byte(fcs.HeadBlockHash) == [32]byte{} {
// check if we are sending FCU at genesis
hash, err := s.hashForGenesisBlock(ctx, arg.headRoot)
if errors.Is(err, errNotGenesisRoot) {
log.Error("Sending nil head block hash to execution engine")
return nil, nil
}
if err != nil {
return nil, errors.Wrap(err, "could not get head block hash")
}
fcs.HeadBlockHash = hash
}
if arg.attributes == nil {
arg.attributes = payloadattribute.EmptyWithVersion(headBlk.Version())
}
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, arg.attributes)
if err != nil {
switch {
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
forkchoiceUpdatedOptimisticNodeCount.Inc()
log.WithFields(logrus.Fields{
"headSlot": headBlk.Slot(),
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash())),
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
}).Info("Called fork choice updated with optimistic block")
return payloadID, nil
case errors.Is(err, execution.ErrInvalidPayloadStatus):
forkchoiceUpdatedInvalidNodeCount.Inc()
headRoot := arg.headRoot
if len(lastValidHash) == 0 {
lastValidHash = defaultLatestValidHash
}
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
if err != nil {
log.WithError(err).Error("Could not set head root to invalid")
return nil, nil
}
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
log.WithError(err).Error("Could not remove invalid block and state")
return nil, nil
}
r, err := s.cfg.ForkChoiceStore.Head(ctx)
if err != nil {
log.WithFields(logrus.Fields{
"slot": headBlk.Slot(),
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot[:])),
"invalidChildrenCount": len(invalidRoots),
}).Warn("Pruned invalid blocks, could not update head root")
return nil, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot, invalidAncestorRoots: invalidRoots}
}
b, err := s.getBlock(ctx, r)
if err != nil {
log.WithError(err).Error("Could not get head block")
return nil, nil
}
st, err := s.cfg.StateGen.StateByRoot(ctx, r)
if err != nil {
log.WithError(err).Error("Could not get head state")
return nil, nil
}
pid, err := s.notifyForkchoiceUpdate(ctx, &fcuConfig{
headState: st,
headRoot: r,
headBlock: b,
attributes: arg.attributes,
})
if err != nil {
return nil, err // Returning err because it's recursive here.
}
if err := s.saveHead(ctx, r, b, st); err != nil {
log.WithError(err).Error("could not save head after pruning invalid blocks")
}
log.WithFields(logrus.Fields{
"slot": headBlk.Slot(),
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot[:])),
"invalidChildrenCount": len(invalidRoots),
"newHeadRoot": fmt.Sprintf("%#x", bytesutil.Trunc(r[:])),
}).Warn("Pruned invalid blocks")
return pid, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot, invalidAncestorRoots: invalidRoots}
default:
log.WithError(err).Error(ErrUndefinedExecutionEngineError)
return nil, nil
}
}
forkchoiceUpdatedValidNodeCount.Inc()
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, arg.headRoot); err != nil {
log.WithError(err).Error("Could not set head root to valid")
return nil, nil
}
// If the forkchoice update call has an attribute, update the payload ID cache.
hasAttr := arg.attributes != nil && !arg.attributes.IsEmpty()
nextSlot := s.CurrentSlot() + 1
if hasAttr && payloadID != nil {
var pId [8]byte
copy(pId[:], payloadID[:])
log.WithFields(logrus.Fields{
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(arg.headRoot[:])),
"headSlot": headBlk.Slot(),
"nextSlot": nextSlot,
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
}).Info("Forkchoice updated with payload attributes for proposal")
s.cfg.PayloadIDCache.Set(nextSlot, arg.headRoot, pId)
} else if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
log.WithFields(logrus.Fields{
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
"slot": headBlk.Slot(),
"nextSlot": nextSlot,
}).Error("Received nil payload ID on VALID engine response")
}
return payloadID, nil
}
func (s *Service) firePayloadAttributesEvent(f event.SubscriberSender, block interfaces.ReadOnlySignedBeaconBlock, root [32]byte, nextSlot primitives.Slot) {
@@ -70,7 +219,76 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
// It returns true if the EL has returned VALID for the block
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
preStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
return true, nil
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
defer span.End()
// Execution payload is only supported in Bellatrix and beyond. Pre
// merge blocks are never optimistic
if blk == nil {
return false, errors.New("signed beacon block can't be nil")
}
if preStateVersion < version.Bellatrix {
return true, nil
}
if err := consensusblocks.BeaconBlockIsNil(blk); err != nil {
return false, err
}
body := blk.Block().Body()
enabled, err := blocks.IsExecutionEnabledUsingHeader(preStateHeader, body)
if err != nil {
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
}
if !enabled {
return true, nil
}
payload, err := body.Execution()
if err != nil {
return false, errors.Wrap(invalidBlock{error: err}, "could not get execution payload")
}
var lastValidHash []byte
var parentRoot *common.Hash
var versionedHashes []common.Hash
var requests *enginev1.ExecutionRequests
if blk.Version() >= version.Deneb {
versionedHashes, err = kzgCommitmentsToVersionedHashes(blk.Block().Body())
if err != nil {
return false, errors.Wrap(err, "could not get versioned hashes to feed the engine")
}
prh := common.Hash(blk.Block().ParentRoot())
parentRoot = &prh
}
if blk.Version() >= version.Electra {
requests, err = blk.Block().Body().ExecutionRequests()
if err != nil {
return false, errors.Wrap(err, "could not get execution requests")
}
if requests == nil {
return false, errors.New("nil execution requests")
}
}
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
switch {
case err == nil:
newPayloadValidNodeCount.Inc()
return true, nil
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
newPayloadOptimisticNodeCount.Inc()
log.WithFields(logrus.Fields{
"slot": blk.Block().Slot(),
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
}).Info("Called new payload with optimistic block")
return false, nil
case errors.Is(err, execution.ErrInvalidPayloadStatus):
lvh := bytesutil.ToBytes32(lastValidHash)
return false, invalidBlock{
error: ErrInvalidPayload,
lastValidHash: lvh,
}
default:
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
}
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer

View File

@@ -2796,7 +2796,7 @@ func TestProcessLightClientUpdate(t *testing.T) {
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
@@ -2848,7 +2848,7 @@ func TestProcessLightClientUpdate(t *testing.T) {
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
require.NoError(t, err)
scb := make([]byte, 64)
@@ -2954,7 +2954,7 @@ func TestProcessLightClientUpdate(t *testing.T) {
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
@@ -3006,7 +3006,7 @@ func TestProcessLightClientUpdate(t *testing.T) {
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
require.NoError(t, err)
scb := make([]byte, 64)
@@ -3112,7 +3112,7 @@ func TestProcessLightClientUpdate(t *testing.T) {
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
require.NoError(t, err)
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
@@ -3164,7 +3164,7 @@ func TestProcessLightClientUpdate(t *testing.T) {
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
// create and save old update
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
require.NoError(t, err)
scb := make([]byte, 64)
@@ -3647,7 +3647,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
expectedVersion = version.Altair
case 2:
forkEpoch = uint64(params.BeaconConfig().BellatrixForkEpoch)
expectedVersion = version.Altair
expectedVersion = version.Bellatrix
case 3:
forkEpoch = uint64(params.BeaconConfig().CapellaForkEpoch)
expectedVersion = version.Capella
@@ -3656,7 +3656,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
expectedVersion = version.Deneb
case 5:
forkEpoch = uint64(params.BeaconConfig().ElectraForkEpoch)
expectedVersion = version.Deneb
expectedVersion = version.Electra
default:
t.Errorf("Unsupported fork version %s", version.String(testVersion))
}
@@ -3801,7 +3801,7 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
expectedVersion = version.Altair
case 2:
forkEpoch = uint64(params.BeaconConfig().BellatrixForkEpoch)
expectedVersion = version.Altair
expectedVersion = version.Bellatrix
case 3:
forkEpoch = uint64(params.BeaconConfig().CapellaForkEpoch)
expectedVersion = version.Capella

View File

@@ -31,7 +31,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
@@ -111,26 +110,22 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
type blobNotifierMap struct {
sync.RWMutex
notifiers map[[32]byte]chan uint64
// TODO: Separate blobs from data columns
// seenIndex map[[32]byte][]bool
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
seenIndex map[[32]byte][]bool
}
// notifyIndex notifies a blob by its index for a given root.
// It uses internal maps to keep track of seen indices and notifier channels.
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitives.Slot) {
// TODO: Separate blobs from data columns
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
// if idx >= uint64(maxBlobsPerBlock) {
// return
// }
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
if idx >= uint64(maxBlobsPerBlock) {
return
}
bn.Lock()
seen := bn.seenIndex[root]
// TODO: Separate blobs from data columns
// if seen == nil {
// seen = make([]bool, maxBlobsPerBlock)
// }
if seen == nil {
seen = make([]bool, maxBlobsPerBlock)
}
if seen[idx] {
bn.Unlock()
return
@@ -141,9 +136,7 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
// Retrieve or create the notifier channel for the given root.
c, ok := bn.notifiers[root]
if !ok {
// TODO: Separate blobs from data columns
// c = make(chan uint64, maxBlobsPerBlock)
c = make(chan uint64, fieldparams.NumberOfColumns)
c = make(chan uint64, maxBlobsPerBlock)
bn.notifiers[root] = c
}
@@ -153,15 +146,12 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
}
func (bn *blobNotifierMap) forRoot(root [32]byte, slot primitives.Slot) chan uint64 {
// TODO: Separate blobs from data columns
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
bn.Lock()
defer bn.Unlock()
c, ok := bn.notifiers[root]
if !ok {
// TODO: Separate blobs from data columns
// c = make(chan uint64, maxBlobsPerBlock)
c = make(chan uint64, fieldparams.NumberOfColumns)
c = make(chan uint64, maxBlobsPerBlock)
bn.notifiers[root] = c
}
return c
@@ -187,9 +177,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
ctx, cancel := context.WithCancel(ctx)
bn := &blobNotifierMap{
notifiers: make(map[[32]byte]chan uint64),
// TODO: Separate blobs from data columns
// seenIndex: make(map[[32]byte][]bool),
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
seenIndex: make(map[[32]byte][]bool),
}
srv := &Service{
ctx: ctx,

View File

@@ -587,9 +587,7 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
func TestNotifyIndex(t *testing.T) {
// Initialize a blobNotifierMap
bn := &blobNotifierMap{
// TODO: Separate blobs from data columns
// seenIndex: make(map[[32]byte][]bool),
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
seenIndex: make(map[[32]byte][]bool),
notifiers: make(map[[32]byte]chan uint64),
}

View File

@@ -41,6 +41,7 @@ go_library(
"//encoding/ssz:go_default_library",
"//math:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//network/forks:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",

View File

@@ -11,6 +11,7 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/network/forks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/attestation"
"github.com/OffchainLabs/prysm/v6/time/slots"
@@ -95,30 +96,12 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
}
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
currentEpoch := slots.ToEpoch(header.Header.Slot)
fork, err := params.Fork(currentEpoch)
if err != nil {
return err
}
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
if err != nil {
return err
}
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
if err != nil {
return err
}
proposerPubKey := proposer.PublicKey
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
}
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
// from the above method by not using fork data from the state and instead retrieving it
// via the respective epoch.
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
currentEpoch := slots.ToEpoch(blk.Block().Slot())
fork, err := params.Fork(currentEpoch)
fork, err := forks.Fork(currentEpoch)
if err != nil {
return err
}

View File

@@ -403,7 +403,7 @@ func AssignmentForValidator(
}
}
}
return nil // validator is not scheduled this epoch
return &LiteAssignment{} // validator is not scheduled this epoch
}
// CommitteeAssignments calculates committee assignments for each validator during the specified epoch.
@@ -669,11 +669,11 @@ func ComputeCommittee(
// InitializeProposerLookahead computes the list of the proposer indices for the next MIN_SEED_LOOKAHEAD + 1 epochs.
func InitializeProposerLookahead(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]uint64, error) {
lookAhead := make([]uint64, 0, uint64(params.BeaconConfig().MinSeedLookahead+1)*uint64(params.BeaconConfig().SlotsPerEpoch))
indices, err := ActiveValidatorIndices(ctx, state, epoch)
if err != nil {
return nil, errors.Wrap(err, "could not get active indices")
}
for i := range params.BeaconConfig().MinSeedLookahead + 1 {
indices, err := ActiveValidatorIndices(ctx, state, epoch+i)
if err != nil {
return nil, errors.Wrap(err, "could not get active indices")
}
proposerIndices, err := PrecomputeProposerIndices(state, indices, epoch+i)
if err != nil {
return nil, errors.Wrap(err, "could not compute proposer indices")

View File

@@ -912,6 +912,7 @@ func TestAssignmentForValidator(t *testing.T) {
{{4, 5, 6}},
}
got = helpers.AssignmentForValidator(bySlot, start, primitives.ValidatorIndex(99))
require.IsNil(t, got)
// should be empty to be safe
require.DeepEqual(t, &helpers.LiteAssignment{}, got)
})
}

View File

@@ -78,7 +78,6 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
@@ -265,7 +264,6 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
}
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
params.SetupTestConfigCleanup(t)
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)

View File

@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"helpers.go",
"lightclient.go",
"store.go",
],
@@ -41,7 +42,6 @@ go_test(
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/light-client:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/ssz:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -0,0 +1,243 @@
package light_client
import (
"context"
"fmt"
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
light_client "github.com/OffchainLabs/prysm/v6/consensus-types/light-client"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/pkg/errors"
"google.golang.org/protobuf/proto"
)
func createDefaultLightClientBootstrap(currentSlot primitives.Slot) (interfaces.LightClientBootstrap, error) {
currentEpoch := slots.ToEpoch(currentSlot)
syncCommitteeSize := params.BeaconConfig().SyncCommitteeSize
pubKeys := make([][]byte, syncCommitteeSize)
for i := uint64(0); i < syncCommitteeSize; i++ {
pubKeys[i] = make([]byte, fieldparams.BLSPubkeyLength)
}
currentSyncCommittee := &pb.SyncCommittee{
Pubkeys: pubKeys,
AggregatePubkey: make([]byte, fieldparams.BLSPubkeyLength),
}
var currentSyncCommitteeBranch [][]byte
if currentEpoch >= params.BeaconConfig().ElectraForkEpoch {
currentSyncCommitteeBranch = make([][]byte, fieldparams.SyncCommitteeBranchDepthElectra)
} else {
currentSyncCommitteeBranch = make([][]byte, fieldparams.SyncCommitteeBranchDepth)
}
for i := 0; i < len(currentSyncCommitteeBranch); i++ {
currentSyncCommitteeBranch[i] = make([]byte, fieldparams.RootLength)
}
executionBranch := make([][]byte, fieldparams.ExecutionBranchDepth)
for i := 0; i < fieldparams.ExecutionBranchDepth; i++ {
executionBranch[i] = make([]byte, 32)
}
var m proto.Message
if currentEpoch < params.BeaconConfig().CapellaForkEpoch {
m = &pb.LightClientBootstrapAltair{
Header: &pb.LightClientHeaderAltair{
Beacon: &pb.BeaconBlockHeader{},
},
CurrentSyncCommittee: currentSyncCommittee,
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
}
} else if currentEpoch < params.BeaconConfig().DenebForkEpoch {
m = &pb.LightClientBootstrapCapella{
Header: &pb.LightClientHeaderCapella{
Beacon: &pb.BeaconBlockHeader{},
Execution: &enginev1.ExecutionPayloadHeaderCapella{},
ExecutionBranch: executionBranch,
},
CurrentSyncCommittee: currentSyncCommittee,
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
}
} else if currentEpoch < params.BeaconConfig().ElectraForkEpoch {
m = &pb.LightClientBootstrapDeneb{
Header: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{},
ExecutionBranch: executionBranch,
},
CurrentSyncCommittee: currentSyncCommittee,
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
}
} else {
m = &pb.LightClientBootstrapElectra{
Header: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{},
ExecutionBranch: executionBranch,
},
CurrentSyncCommittee: currentSyncCommittee,
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
}
}
return light_client.NewWrappedBootstrap(m)
}
func makeExecutionAndProofDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*enginev1.ExecutionPayloadHeaderDeneb, [][]byte, error) {
if blk.Version() < version.Capella {
p, err := execution.EmptyExecutionPayloadHeader(version.Deneb)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get payload header")
}
payloadHeader, ok := p.(*enginev1.ExecutionPayloadHeaderDeneb)
if !ok {
return nil, nil, fmt.Errorf("payload header type %T is not %T", p, &enginev1.ExecutionPayloadHeaderDeneb{})
}
payloadProof := emptyPayloadProof()
return payloadHeader, payloadProof, nil
}
payload, err := blk.Block().Body().Execution()
if err != nil {
return nil, nil, errors.Wrap(err, "could not get execution payload")
}
transactionsRoot, err := ComputeTransactionsRoot(payload)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get transactions root")
}
withdrawalsRoot, err := ComputeWithdrawalsRoot(payload)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get withdrawals root")
}
payloadHeader := &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: payload.ParentHash(),
FeeRecipient: payload.FeeRecipient(),
StateRoot: payload.StateRoot(),
ReceiptsRoot: payload.ReceiptsRoot(),
LogsBloom: payload.LogsBloom(),
PrevRandao: payload.PrevRandao(),
BlockNumber: payload.BlockNumber(),
GasLimit: payload.GasLimit(),
GasUsed: payload.GasUsed(),
Timestamp: payload.Timestamp(),
ExtraData: payload.ExtraData(),
BaseFeePerGas: payload.BaseFeePerGas(),
BlockHash: payload.BlockHash(),
TransactionsRoot: transactionsRoot,
WithdrawalsRoot: withdrawalsRoot,
BlobGasUsed: 0,
ExcessBlobGas: 0,
}
if blk.Version() >= version.Deneb {
blobGasUsed, err := payload.BlobGasUsed()
if err != nil {
return nil, nil, errors.Wrap(err, "could not get blob gas used")
}
excessBlobGas, err := payload.ExcessBlobGas()
if err != nil {
return nil, nil, errors.Wrap(err, "could not get excess blob gas")
}
payloadHeader.BlobGasUsed = blobGasUsed
payloadHeader.ExcessBlobGas = excessBlobGas
}
payloadProof, err := blocks.PayloadProof(ctx, blk.Block())
if err != nil {
return nil, nil, errors.Wrap(err, "could not get execution payload proof")
}
return payloadHeader, payloadProof, nil
}
func makeExecutionAndProofCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*enginev1.ExecutionPayloadHeaderCapella, [][]byte, error) {
if blk.Version() > version.Capella {
return nil, nil, fmt.Errorf("unsupported block version %s for capella execution payload", version.String(blk.Version()))
}
if blk.Version() < version.Capella {
p, err := execution.EmptyExecutionPayloadHeader(version.Capella)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get payload header")
}
payloadHeader, ok := p.(*enginev1.ExecutionPayloadHeaderCapella)
if !ok {
return nil, nil, fmt.Errorf("payload header type %T is not %T", p, &enginev1.ExecutionPayloadHeaderCapella{})
}
payloadProof := emptyPayloadProof()
return payloadHeader, payloadProof, nil
}
payload, err := blk.Block().Body().Execution()
if err != nil {
return nil, nil, errors.Wrap(err, "could not get execution payload")
}
transactionsRoot, err := ComputeTransactionsRoot(payload)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get transactions root")
}
withdrawalsRoot, err := ComputeWithdrawalsRoot(payload)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get withdrawals root")
}
payloadHeader := &enginev1.ExecutionPayloadHeaderCapella{
ParentHash: payload.ParentHash(),
FeeRecipient: payload.FeeRecipient(),
StateRoot: payload.StateRoot(),
ReceiptsRoot: payload.ReceiptsRoot(),
LogsBloom: payload.LogsBloom(),
PrevRandao: payload.PrevRandao(),
BlockNumber: payload.BlockNumber(),
GasLimit: payload.GasLimit(),
GasUsed: payload.GasUsed(),
Timestamp: payload.Timestamp(),
ExtraData: payload.ExtraData(),
BaseFeePerGas: payload.BaseFeePerGas(),
BlockHash: payload.BlockHash(),
TransactionsRoot: transactionsRoot,
WithdrawalsRoot: withdrawalsRoot,
}
payloadProof, err := blocks.PayloadProof(ctx, blk.Block())
if err != nil {
return nil, nil, errors.Wrap(err, "could not get execution payload proof")
}
return payloadHeader, payloadProof, nil
}
func makeBeaconBlockHeader(blk interfaces.ReadOnlySignedBeaconBlock) (*pb.BeaconBlockHeader, error) {
parentRoot := blk.Block().ParentRoot()
stateRoot := blk.Block().StateRoot()
bodyRoot, err := blk.Block().Body().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "could not get body root")
}
return &pb.BeaconBlockHeader{
Slot: blk.Block().Slot(),
ProposerIndex: blk.Block().ProposerIndex(),
ParentRoot: parentRoot[:],
StateRoot: stateRoot[:],
BodyRoot: bodyRoot[:],
}, nil
}
func emptyPayloadProof() [][]byte {
branch := interfaces.LightClientExecutionBranch{}
proof := make([][]byte, len(branch))
for i, b := range branch {
proof[i] = b[:]
}
return proof
}

View File

@@ -6,12 +6,10 @@ import (
"fmt"
"reflect"
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
consensus_types "github.com/OffchainLabs/prysm/v6/consensus-types"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
light_client "github.com/OffchainLabs/prysm/v6/consensus-types/light-client"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
@@ -163,13 +161,13 @@ func NewLightClientUpdateFromBeaconState(
updateAttestedPeriod := slots.SyncCommitteePeriod(slots.ToEpoch(attestedBlock.Block().Slot()))
// update = LightClientUpdate()
result, err := CreateDefaultLightClientUpdate(currentSlot, attestedState)
result, err := CreateDefaultLightClientUpdate(attestedBlock)
if err != nil {
return nil, errors.Wrap(err, "could not create default light client update")
}
// update.attested_header = block_to_light_client_header(attested_block)
attestedLightClientHeader, err := BlockToLightClientHeader(ctx, currentSlot, attestedBlock)
attestedLightClientHeader, err := BlockToLightClientHeader(ctx, attestedBlock.Version(), attestedBlock)
if err != nil {
return nil, errors.Wrap(err, "could not get attested light client header")
}
@@ -210,7 +208,7 @@ func NewLightClientUpdateFromBeaconState(
// if finalized_block.message.slot != GENESIS_SLOT
if finalizedBlock.Block().Slot() != 0 {
// update.finalized_header = block_to_light_client_header(finalized_block)
finalizedLightClientHeader, err := BlockToLightClientHeader(ctx, currentSlot, finalizedBlock)
finalizedLightClientHeader, err := BlockToLightClientHeader(ctx, attestedBlock.Version(), finalizedBlock)
if err != nil {
return nil, errors.Wrap(err, "could not get finalized light client header")
}
@@ -247,9 +245,7 @@ func NewLightClientUpdateFromBeaconState(
return result, nil
}
func CreateDefaultLightClientUpdate(currentSlot primitives.Slot, attestedState state.BeaconState) (interfaces.LightClientUpdate, error) {
currentEpoch := slots.ToEpoch(currentSlot)
func CreateDefaultLightClientUpdate(attestedBlock interfaces.ReadOnlySignedBeaconBlock) (interfaces.LightClientUpdate, error) {
syncCommitteeSize := params.BeaconConfig().SyncCommitteeSize
pubKeys := make([][]byte, syncCommitteeSize)
for i := uint64(0); i < syncCommitteeSize; i++ {
@@ -261,7 +257,7 @@ func CreateDefaultLightClientUpdate(currentSlot primitives.Slot, attestedState s
}
var nextSyncCommitteeBranch [][]byte
if attestedState.Version() >= version.Electra {
if attestedBlock.Version() >= version.Electra {
nextSyncCommitteeBranch = make([][]byte, fieldparams.SyncCommitteeBranchDepthElectra)
} else {
nextSyncCommitteeBranch = make([][]byte, fieldparams.SyncCommitteeBranchDepth)
@@ -276,7 +272,7 @@ func CreateDefaultLightClientUpdate(currentSlot primitives.Slot, attestedState s
}
var finalityBranch [][]byte
if attestedState.Version() >= version.Electra {
if attestedBlock.Version() >= version.Electra {
finalityBranch = make([][]byte, fieldparams.FinalityBranchDepthElectra)
} else {
finalityBranch = make([][]byte, fieldparams.FinalityBranchDepth)
@@ -286,10 +282,12 @@ func CreateDefaultLightClientUpdate(currentSlot primitives.Slot, attestedState s
}
var m proto.Message
if currentEpoch < params.BeaconConfig().CapellaForkEpoch {
switch attestedBlock.Version() {
case version.Altair, version.Bellatrix:
m = &pb.LightClientUpdateAltair{
AttestedHeader: &pb.LightClientHeaderAltair{
Beacon: &pb.BeaconBlockHeader{
Slot: attestedBlock.Block().Slot(),
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
@@ -310,10 +308,11 @@ func CreateDefaultLightClientUpdate(currentSlot primitives.Slot, attestedState s
SyncCommitteeSignature: make([]byte, 96),
},
}
} else if currentEpoch < params.BeaconConfig().DenebForkEpoch {
case version.Capella:
m = &pb.LightClientUpdateCapella{
AttestedHeader: &pb.LightClientHeaderCapella{
Beacon: &pb.BeaconBlockHeader{
Slot: attestedBlock.Block().Slot(),
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
@@ -362,10 +361,11 @@ func CreateDefaultLightClientUpdate(currentSlot primitives.Slot, attestedState s
SyncCommitteeSignature: make([]byte, 96),
},
}
} else if currentEpoch < params.BeaconConfig().ElectraForkEpoch {
case version.Deneb:
m = &pb.LightClientUpdateDeneb{
AttestedHeader: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
Slot: attestedBlock.Block().Slot(),
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
@@ -418,120 +418,65 @@ func CreateDefaultLightClientUpdate(currentSlot primitives.Slot, attestedState s
SyncCommitteeSignature: make([]byte, 96),
},
}
} else {
if attestedState.Version() >= version.Electra {
m = &pb.LightClientUpdateElectra{
AttestedHeader: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
GasLimit: 0,
GasUsed: 0,
},
ExecutionBranch: executionBranch,
case version.Electra, version.Fulu:
m = &pb.LightClientUpdateElectra{
AttestedHeader: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
Slot: attestedBlock.Block().Slot(),
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
NextSyncCommittee: nextSyncCommittee,
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
FinalityBranch: finalityBranch,
FinalizedHeader: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
GasLimit: 0,
GasUsed: 0,
},
ExecutionBranch: executionBranch,
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
GasLimit: 0,
GasUsed: 0,
},
SyncAggregate: &pb.SyncAggregate{
SyncCommitteeBits: make([]byte, 64),
SyncCommitteeSignature: make([]byte, 96),
ExecutionBranch: executionBranch,
},
NextSyncCommittee: nextSyncCommittee,
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
FinalityBranch: finalityBranch,
FinalizedHeader: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
}
} else {
m = &pb.LightClientUpdateDeneb{
AttestedHeader: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
GasLimit: 0,
GasUsed: 0,
},
ExecutionBranch: executionBranch,
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
GasLimit: 0,
GasUsed: 0,
},
NextSyncCommittee: nextSyncCommittee,
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
FinalityBranch: finalityBranch,
FinalizedHeader: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
GasLimit: 0,
GasUsed: 0,
},
ExecutionBranch: executionBranch,
},
SyncAggregate: &pb.SyncAggregate{
SyncCommitteeBits: make([]byte, 64),
SyncCommitteeSignature: make([]byte, 96),
},
}
ExecutionBranch: executionBranch,
},
SyncAggregate: &pb.SyncAggregate{
SyncCommitteeBits: make([]byte, 64),
SyncCommitteeSignature: make([]byte, 96),
},
}
default:
return nil, errors.Errorf("unsupported beacon chain version %s", version.String(attestedBlock.Version()))
}
return light_client.NewWrappedUpdate(m)
@@ -575,189 +520,52 @@ func ComputeWithdrawalsRoot(payload interfaces.ExecutionData) ([]byte, error) {
func BlockToLightClientHeader(
ctx context.Context,
currentSlot primitives.Slot,
block interfaces.ReadOnlySignedBeaconBlock,
attestedBlockVersion int, // this is the version that the light client header should be in, based on the attested block.
block interfaces.ReadOnlySignedBeaconBlock, // this block is either the attested block, or the finalized block.
// in case of the latter, we might need to upgrade it to the attested block's version.
) (interfaces.LightClientHeader, error) {
var m proto.Message
currentEpoch := slots.ToEpoch(currentSlot)
blockEpoch := slots.ToEpoch(block.Block().Slot())
parentRoot := block.Block().ParentRoot()
stateRoot := block.Block().StateRoot()
bodyRoot, err := block.Block().Body().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "could not get body root")
if block.Version() > attestedBlockVersion {
return nil, errors.Errorf("block version %s is greater than attested block version %s", version.String(block.Version()), version.String(attestedBlockVersion))
}
if currentEpoch < params.BeaconConfig().CapellaForkEpoch {
beacon, err := makeBeaconBlockHeader(block)
if err != nil {
return nil, errors.Wrap(err, "could not make beacon block header")
}
var m proto.Message
switch attestedBlockVersion {
case version.Altair, version.Bellatrix:
m = &pb.LightClientHeaderAltair{
Beacon: &pb.BeaconBlockHeader{
Slot: block.Block().Slot(),
ProposerIndex: block.Block().ProposerIndex(),
ParentRoot: parentRoot[:],
StateRoot: stateRoot[:],
BodyRoot: bodyRoot[:],
},
Beacon: beacon,
}
} else if currentEpoch < params.BeaconConfig().DenebForkEpoch {
var payloadHeader *enginev1.ExecutionPayloadHeaderCapella
var payloadProof [][]byte
if blockEpoch < params.BeaconConfig().CapellaForkEpoch {
var ok bool
p, err := execution.EmptyExecutionPayloadHeader(version.Capella)
if err != nil {
return nil, errors.Wrap(err, "could not get payload header")
}
payloadHeader, ok = p.(*enginev1.ExecutionPayloadHeaderCapella)
if !ok {
return nil, fmt.Errorf("payload header type %T is not %T", p, &enginev1.ExecutionPayloadHeaderCapella{})
}
payloadProof = emptyPayloadProof()
} else {
payload, err := block.Block().Body().Execution()
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload")
}
transactionsRoot, err := ComputeTransactionsRoot(payload)
if err != nil {
return nil, errors.Wrap(err, "could not get transactions root")
}
withdrawalsRoot, err := ComputeWithdrawalsRoot(payload)
if err != nil {
return nil, errors.Wrap(err, "could not get withdrawals root")
}
payloadHeader = &enginev1.ExecutionPayloadHeaderCapella{
ParentHash: payload.ParentHash(),
FeeRecipient: payload.FeeRecipient(),
StateRoot: payload.StateRoot(),
ReceiptsRoot: payload.ReceiptsRoot(),
LogsBloom: payload.LogsBloom(),
PrevRandao: payload.PrevRandao(),
BlockNumber: payload.BlockNumber(),
GasLimit: payload.GasLimit(),
GasUsed: payload.GasUsed(),
Timestamp: payload.Timestamp(),
ExtraData: payload.ExtraData(),
BaseFeePerGas: payload.BaseFeePerGas(),
BlockHash: payload.BlockHash(),
TransactionsRoot: transactionsRoot,
WithdrawalsRoot: withdrawalsRoot,
}
payloadProof, err = blocks.PayloadProof(ctx, block.Block())
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload proof")
}
case version.Capella:
payloadHeader, payloadProof, err := makeExecutionAndProofCapella(ctx, block)
if err != nil {
return nil, errors.Wrap(err, "could not make execution payload header and proof")
}
m = &pb.LightClientHeaderCapella{
Beacon: &pb.BeaconBlockHeader{
Slot: block.Block().Slot(),
ProposerIndex: block.Block().ProposerIndex(),
ParentRoot: parentRoot[:],
StateRoot: stateRoot[:],
BodyRoot: bodyRoot[:],
},
Beacon: beacon,
Execution: payloadHeader,
ExecutionBranch: payloadProof,
}
} else {
var payloadHeader *enginev1.ExecutionPayloadHeaderDeneb
var payloadProof [][]byte
if blockEpoch < params.BeaconConfig().CapellaForkEpoch {
var ok bool
p, err := execution.EmptyExecutionPayloadHeader(version.Deneb)
if err != nil {
return nil, errors.Wrap(err, "could not get payload header")
}
payloadHeader, ok = p.(*enginev1.ExecutionPayloadHeaderDeneb)
if !ok {
return nil, fmt.Errorf("payload header type %T is not %T", p, &enginev1.ExecutionPayloadHeaderDeneb{})
}
payloadProof = emptyPayloadProof()
} else {
payload, err := block.Block().Body().Execution()
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload")
}
transactionsRoot, err := ComputeTransactionsRoot(payload)
if err != nil {
return nil, errors.Wrap(err, "could not get transactions root")
}
withdrawalsRoot, err := ComputeWithdrawalsRoot(payload)
if err != nil {
return nil, errors.Wrap(err, "could not get withdrawals root")
}
var blobGasUsed uint64
var excessBlobGas uint64
if blockEpoch >= params.BeaconConfig().DenebForkEpoch {
blobGasUsed, err = payload.BlobGasUsed()
if err != nil {
return nil, errors.Wrap(err, "could not get blob gas used")
}
excessBlobGas, err = payload.ExcessBlobGas()
if err != nil {
return nil, errors.Wrap(err, "could not get excess blob gas")
}
}
payloadHeader = &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: payload.ParentHash(),
FeeRecipient: payload.FeeRecipient(),
StateRoot: payload.StateRoot(),
ReceiptsRoot: payload.ReceiptsRoot(),
LogsBloom: payload.LogsBloom(),
PrevRandao: payload.PrevRandao(),
BlockNumber: payload.BlockNumber(),
GasLimit: payload.GasLimit(),
GasUsed: payload.GasUsed(),
Timestamp: payload.Timestamp(),
ExtraData: payload.ExtraData(),
BaseFeePerGas: payload.BaseFeePerGas(),
BlockHash: payload.BlockHash(),
TransactionsRoot: transactionsRoot,
WithdrawalsRoot: withdrawalsRoot,
BlobGasUsed: blobGasUsed,
ExcessBlobGas: excessBlobGas,
}
payloadProof, err = blocks.PayloadProof(ctx, block.Block())
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload proof")
}
case version.Deneb, version.Electra, version.Fulu:
payloadHeader, payloadProof, err := makeExecutionAndProofDeneb(ctx, block)
if err != nil {
return nil, errors.Wrap(err, "could not make execution payload header and proof")
}
m = &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
Slot: block.Block().Slot(),
ProposerIndex: block.Block().ProposerIndex(),
ParentRoot: parentRoot[:],
StateRoot: stateRoot[:],
BodyRoot: bodyRoot[:],
},
Beacon: beacon,
Execution: payloadHeader,
ExecutionBranch: payloadProof,
}
default:
return nil, fmt.Errorf("unsupported attested block version %s", version.String(attestedBlockVersion))
}
return light_client.NewWrappedHeader(m)
}
func emptyPayloadProof() [][]byte {
branch := interfaces.LightClientExecutionBranch{}
proof := make([][]byte, len(branch))
for i, b := range branch {
proof[i] = b[:]
}
return proof
}
func HasRelevantSyncCommittee(update interfaces.LightClientUpdate) (bool, error) {
if update.Version() >= version.Electra {
branch, err := update.NextSyncCommitteeBranchElectra()
@@ -909,7 +717,7 @@ func NewLightClientBootstrapFromBeaconState(
return nil, errors.Wrap(err, "could not create default light client bootstrap")
}
lightClientHeader, err := BlockToLightClientHeader(ctx, currentSlot, block)
lightClientHeader, err := BlockToLightClientHeader(ctx, state.Version(), block)
if err != nil {
return nil, errors.Wrap(err, "could not convert block to light client header")
}
@@ -942,78 +750,6 @@ func NewLightClientBootstrapFromBeaconState(
return bootstrap, nil
}
func createDefaultLightClientBootstrap(currentSlot primitives.Slot) (interfaces.LightClientBootstrap, error) {
currentEpoch := slots.ToEpoch(currentSlot)
syncCommitteeSize := params.BeaconConfig().SyncCommitteeSize
pubKeys := make([][]byte, syncCommitteeSize)
for i := uint64(0); i < syncCommitteeSize; i++ {
pubKeys[i] = make([]byte, fieldparams.BLSPubkeyLength)
}
currentSyncCommittee := &pb.SyncCommittee{
Pubkeys: pubKeys,
AggregatePubkey: make([]byte, fieldparams.BLSPubkeyLength),
}
var currentSyncCommitteeBranch [][]byte
if currentEpoch >= params.BeaconConfig().ElectraForkEpoch {
currentSyncCommitteeBranch = make([][]byte, fieldparams.SyncCommitteeBranchDepthElectra)
} else {
currentSyncCommitteeBranch = make([][]byte, fieldparams.SyncCommitteeBranchDepth)
}
for i := 0; i < len(currentSyncCommitteeBranch); i++ {
currentSyncCommitteeBranch[i] = make([]byte, fieldparams.RootLength)
}
executionBranch := make([][]byte, fieldparams.ExecutionBranchDepth)
for i := 0; i < fieldparams.ExecutionBranchDepth; i++ {
executionBranch[i] = make([]byte, 32)
}
// TODO: can this be based on the current epoch?
var m proto.Message
if currentEpoch < params.BeaconConfig().CapellaForkEpoch {
m = &pb.LightClientBootstrapAltair{
Header: &pb.LightClientHeaderAltair{
Beacon: &pb.BeaconBlockHeader{},
},
CurrentSyncCommittee: currentSyncCommittee,
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
}
} else if currentEpoch < params.BeaconConfig().DenebForkEpoch {
m = &pb.LightClientBootstrapCapella{
Header: &pb.LightClientHeaderCapella{
Beacon: &pb.BeaconBlockHeader{},
Execution: &enginev1.ExecutionPayloadHeaderCapella{},
ExecutionBranch: executionBranch,
},
CurrentSyncCommittee: currentSyncCommittee,
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
}
} else if currentEpoch < params.BeaconConfig().ElectraForkEpoch {
m = &pb.LightClientBootstrapDeneb{
Header: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{},
ExecutionBranch: executionBranch,
},
CurrentSyncCommittee: currentSyncCommittee,
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
}
} else {
m = &pb.LightClientBootstrapElectra{
Header: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{},
ExecutionBranch: executionBranch,
},
CurrentSyncCommittee: currentSyncCommittee,
CurrentSyncCommitteeBranch: currentSyncCommitteeBranch,
}
}
return light_client.NewWrappedBootstrap(m)
}
func UpdateHasSupermajority(syncAggregate *pb.SyncAggregate) bool {
maxActiveParticipants := syncAggregate.SyncCommitteeBits.Len()
numActiveParticipants := syncAggregate.SyncCommitteeBits.Count()

View File

@@ -7,7 +7,6 @@ import (
"github.com/OffchainLabs/prysm/v6/config/params"
light_client "github.com/OffchainLabs/prysm/v6/consensus-types/light-client"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/runtime/version"
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
@@ -547,7 +546,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
header, err := lightClient.BlockToLightClientHeader(
l.Ctx,
primitives.Slot(params.BeaconConfig().AltairForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
version.Altair,
l.Block,
)
require.NoError(t, err)
@@ -570,7 +569,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
header, err := lightClient.BlockToLightClientHeader(
l.Ctx,
primitives.Slot(params.BeaconConfig().BellatrixForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
version.Bellatrix,
l.Block,
)
require.NoError(t, err)
@@ -594,7 +593,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
header, err := lightClient.BlockToLightClientHeader(
l.Ctx,
primitives.Slot(params.BeaconConfig().CapellaForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
version.Capella,
l.Block,
)
require.NoError(t, err)
@@ -655,7 +654,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
header, err := lightClient.BlockToLightClientHeader(
l.Ctx,
primitives.Slot(params.BeaconConfig().CapellaForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
version.Capella,
l.Block,
)
require.NoError(t, err)
@@ -718,7 +717,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
header, err := lightClient.BlockToLightClientHeader(
l.Ctx,
primitives.Slot(params.BeaconConfig().DenebForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
version.Deneb,
l.Block,
)
require.NoError(t, err)
@@ -787,7 +786,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
header, err := lightClient.BlockToLightClientHeader(
l.Ctx,
primitives.Slot(params.BeaconConfig().DenebForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
version.Deneb,
l.Block,
)
require.NoError(t, err)
@@ -856,7 +855,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
t.Run("Non-Blinded Beacon Block", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Electra)
header, err := lightClient.BlockToLightClientHeader(l.Ctx, l.State.Slot(), l.Block)
header, err := lightClient.BlockToLightClientHeader(l.Ctx, version.Electra, l.Block)
require.NoError(t, err)
require.NotNil(t, header, "header is nil")
@@ -921,7 +920,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
t.Run("Blinded Beacon Block", func(t *testing.T) {
l := util.NewTestLightClient(t, version.Electra, util.WithBlinded())
header, err := lightClient.BlockToLightClientHeader(l.Ctx, l.State.Slot(), l.Block)
header, err := lightClient.BlockToLightClientHeader(l.Ctx, version.Electra, l.Block)
require.NoError(t, err)
require.NotNil(t, header, "header is nil")
@@ -989,7 +988,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
header, err := lightClient.BlockToLightClientHeader(
l.Ctx,
primitives.Slot(params.BeaconConfig().CapellaForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
version.Capella,
l.Block)
require.NoError(t, err)
require.NotNil(t, header, "header is nil")
@@ -1011,7 +1010,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
header, err := lightClient.BlockToLightClientHeader(
l.Ctx,
primitives.Slot(params.BeaconConfig().DenebForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
version.Deneb,
l.Block)
require.NoError(t, err)
require.NotNil(t, header, "header is nil")
@@ -1034,7 +1033,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
header, err := lightClient.BlockToLightClientHeader(
l.Ctx,
primitives.Slot(params.BeaconConfig().DenebForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
version.Deneb,
l.Block)
require.NoError(t, err)
require.NotNil(t, header, "header is nil")
@@ -1094,7 +1093,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
header, err := lightClient.BlockToLightClientHeader(
l.Ctx,
primitives.Slot(params.BeaconConfig().DenebForkEpoch)*params.BeaconConfig().SlotsPerEpoch,
version.Deneb,
l.Block)
require.NoError(t, err)
require.NotNil(t, header, "header is nil")
@@ -1180,14 +1179,13 @@ func createNonEmptyFinalityBranch() [][]byte {
}
func TestIsBetterUpdate(t *testing.T) {
config := params.BeaconConfig()
st, err := util.NewBeaconStateAltair()
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockAltair())
require.NoError(t, err)
t.Run("new has supermajority but old doesn't", func(t *testing.T) {
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
@@ -1203,9 +1201,9 @@ func TestIsBetterUpdate(t *testing.T) {
})
t.Run("old has supermajority but new doesn't", func(t *testing.T) {
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
@@ -1221,9 +1219,9 @@ func TestIsBetterUpdate(t *testing.T) {
})
t.Run("new doesn't have supermajority and newNumActiveParticipants is greater than oldNumActiveParticipants", func(t *testing.T) {
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
@@ -1239,9 +1237,9 @@ func TestIsBetterUpdate(t *testing.T) {
})
t.Run("new doesn't have supermajority and newNumActiveParticipants is lesser than oldNumActiveParticipants", func(t *testing.T) {
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
@@ -1257,9 +1255,9 @@ func TestIsBetterUpdate(t *testing.T) {
})
t.Run("new has relevant sync committee but old doesn't", func(t *testing.T) {
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
@@ -1296,9 +1294,9 @@ func TestIsBetterUpdate(t *testing.T) {
})
t.Run("old has relevant sync committee but new doesn't", func(t *testing.T) {
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
@@ -1335,9 +1333,9 @@ func TestIsBetterUpdate(t *testing.T) {
})
t.Run("new has finality but old doesn't", func(t *testing.T) {
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
@@ -1378,9 +1376,9 @@ func TestIsBetterUpdate(t *testing.T) {
})
t.Run("old has finality but new doesn't", func(t *testing.T) {
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
@@ -1421,9 +1419,9 @@ func TestIsBetterUpdate(t *testing.T) {
})
t.Run("new has finality and sync committee finality both but old doesn't have sync committee finality", func(t *testing.T) {
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
@@ -1482,9 +1480,9 @@ func TestIsBetterUpdate(t *testing.T) {
})
t.Run("new has finality but doesn't have sync committee finality and old has sync committee finality", func(t *testing.T) {
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
@@ -1543,9 +1541,9 @@ func TestIsBetterUpdate(t *testing.T) {
})
t.Run("new has more active participants than old", func(t *testing.T) {
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
@@ -1561,9 +1559,9 @@ func TestIsBetterUpdate(t *testing.T) {
})
t.Run("new has less active participants than old", func(t *testing.T) {
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
@@ -1579,9 +1577,9 @@ func TestIsBetterUpdate(t *testing.T) {
})
t.Run("new's attested header's slot is lesser than old's attested header's slot", func(t *testing.T) {
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
@@ -1640,9 +1638,9 @@ func TestIsBetterUpdate(t *testing.T) {
})
t.Run("new's attested header's slot is greater than old's attested header's slot", func(t *testing.T) {
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
@@ -1701,9 +1699,9 @@ func TestIsBetterUpdate(t *testing.T) {
})
t.Run("none of the above conditions are met and new signature's slot is less than old signature's slot", func(t *testing.T) {
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
@@ -1762,9 +1760,9 @@ func TestIsBetterUpdate(t *testing.T) {
})
t.Run("none of the above conditions are met and new signature's slot is greater than old signature's slot", func(t *testing.T) {
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(blk)
require.NoError(t, err)
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{

View File

@@ -13,6 +13,10 @@ type Store struct {
lastOptimisticUpdate interfaces.LightClientOptimisticUpdate
}
func NewLightClientStore() *Store {
return &Store{}
}
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate) {
s.mu.Lock()
defer s.mu.Unlock()

View File

@@ -7,7 +7,6 @@ go_library(
"info.go",
"metrics.go",
"p2p_interface.go",
"peer_sampling.go",
"reconstruction.go",
"validator.go",
"verification.go",
@@ -45,7 +44,6 @@ go_test(
"das_core_test.go",
"info_test.go",
"p2p_interface_test.go",
"peer_sampling_test.go",
"reconstruction_test.go",
"utils_test.go",
"validator_test.go",

View File

@@ -10,7 +10,10 @@ import (
"github.com/pkg/errors"
)
const kzgPosition = 11 // The index of the KZG commitment list in the Body
const (
CustodyGroupCountEnrKey = "cgc"
kzgPosition = 11 // The index of the KZG commitment list in the Body
)
var (
ErrIndexTooLarge = errors.New("column index is larger than the specified columns count")
@@ -27,7 +30,7 @@ var (
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#custody-group-count
type Cgc uint64
func (Cgc) ENRKey() string { return params.BeaconNetworkConfig().CustodyGroupCountKey }
func (Cgc) ENRKey() string { return CustodyGroupCountEnrKey }
// VerifyDataColumnSidecar verifies if the data column sidecar is valid.
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar

View File

@@ -1,56 +0,0 @@
package peerdas
import (
"math/big"
"github.com/OffchainLabs/prysm/v6/config/params"
)
// ExtendedSampleCount computes, for a given number of samples per slot and allowed failures the
// number of samples we should actually query from peers.
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/peer-sampling.md#get_extended_sample_count
func ExtendedSampleCount(samplesPerSlot, allowedFailures uint64) uint64 {
// Retrieve the columns count
columnsCount := params.BeaconConfig().NumberOfColumns
// If half of the columns are missing, we are able to reconstruct the data.
// If half of the columns + 1 are missing, we are not able to reconstruct the data.
// This is the smallest worst case.
worstCaseMissing := columnsCount/2 + 1
// Compute the false positive threshold.
falsePositiveThreshold := HypergeomCDF(0, columnsCount, worstCaseMissing, samplesPerSlot)
var sampleCount uint64
// Finally, compute the extended sample count.
for sampleCount = samplesPerSlot; sampleCount < columnsCount+1; sampleCount++ {
if HypergeomCDF(allowedFailures, columnsCount, worstCaseMissing, sampleCount) <= falsePositiveThreshold {
break
}
}
return sampleCount
}
// HypergeomCDF computes the hypergeometric cumulative distribution function.
// https://en.wikipedia.org/wiki/Hypergeometric_distribution
func HypergeomCDF(k, M, n, N uint64) float64 {
denominatorInt := new(big.Int).Binomial(int64(M), int64(N)) // lint:ignore uintcast
denominator := new(big.Float).SetInt(denominatorInt)
rBig := big.NewFloat(0)
for i := uint64(0); i < k+1; i++ {
a := new(big.Int).Binomial(int64(n), int64(i)) // lint:ignore uintcast
b := new(big.Int).Binomial(int64(M-n), int64(N-i))
numeratorInt := new(big.Int).Mul(a, b)
numerator := new(big.Float).SetInt(numeratorInt)
item := new(big.Float).Quo(numerator, denominator)
rBig.Add(rBig, item)
}
r, _ := rBig.Float64()
return r
}

View File

@@ -1,60 +0,0 @@
package peerdas_test
import (
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/testing/require"
)
func TestExtendedSampleCount(t *testing.T) {
const samplesPerSlot = 16
testCases := []struct {
name string
allowedMissings uint64
extendedSampleCount uint64
}{
{name: "allowedMissings=0", allowedMissings: 0, extendedSampleCount: 16},
{name: "allowedMissings=1", allowedMissings: 1, extendedSampleCount: 20},
{name: "allowedMissings=2", allowedMissings: 2, extendedSampleCount: 24},
{name: "allowedMissings=3", allowedMissings: 3, extendedSampleCount: 27},
{name: "allowedMissings=4", allowedMissings: 4, extendedSampleCount: 29},
{name: "allowedMissings=5", allowedMissings: 5, extendedSampleCount: 32},
{name: "allowedMissings=6", allowedMissings: 6, extendedSampleCount: 35},
{name: "allowedMissings=7", allowedMissings: 7, extendedSampleCount: 37},
{name: "allowedMissings=8", allowedMissings: 8, extendedSampleCount: 40},
{name: "allowedMissings=9", allowedMissings: 9, extendedSampleCount: 42},
{name: "allowedMissings=10", allowedMissings: 10, extendedSampleCount: 44},
{name: "allowedMissings=11", allowedMissings: 11, extendedSampleCount: 47},
{name: "allowedMissings=12", allowedMissings: 12, extendedSampleCount: 49},
{name: "allowedMissings=13", allowedMissings: 13, extendedSampleCount: 51},
{name: "allowedMissings=14", allowedMissings: 14, extendedSampleCount: 53},
{name: "allowedMissings=15", allowedMissings: 15, extendedSampleCount: 55},
{name: "allowedMissings=16", allowedMissings: 16, extendedSampleCount: 57},
{name: "allowedMissings=17", allowedMissings: 17, extendedSampleCount: 59},
{name: "allowedMissings=18", allowedMissings: 18, extendedSampleCount: 61},
{name: "allowedMissings=19", allowedMissings: 19, extendedSampleCount: 63},
{name: "allowedMissings=20", allowedMissings: 20, extendedSampleCount: 65},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := peerdas.ExtendedSampleCount(samplesPerSlot, tc.allowedMissings)
require.Equal(t, tc.extendedSampleCount, result)
})
}
}
func TestHypergeomCDF(t *testing.T) {
// Test case from https://en.wikipedia.org/wiki/Hypergeometric_distribution
// Population size: 1000, number of successes in population: 500, sample size: 10, number of successes in sample: 5
// Expected result: 0.072
const (
expected = 0.0796665913283742
margin = 0.000001
)
actual := peerdas.HypergeomCDF(5, 128, 65, 16)
require.Equal(t, true, expected-margin <= actual && actual <= expected+margin)
}

View File

@@ -4,6 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"domain.go",
"signature.go",
"signing_root.go",
],
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing",
@@ -24,6 +25,7 @@ go_test(
name = "go_default_test",
srcs = [
"domain_test.go",
"signature_test.go",
"signing_root_test.go",
],
embed = [":go_default_library"],

View File

@@ -0,0 +1,34 @@
package signing
import (
"github.com/OffchainLabs/prysm/v6/config/params"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/pkg/errors"
)
var ErrNilRegistration = errors.New("nil signed registration")
// VerifyRegistrationSignature verifies the signature of a validator's registration.
func VerifyRegistrationSignature(
sr *ethpb.SignedValidatorRegistrationV1,
) error {
if sr == nil || sr.Message == nil {
return ErrNilRegistration
}
d := params.BeaconConfig().DomainApplicationBuilder
// Per spec, we want the fork version and genesis validator to be nil.
// Which is genesis value and zero by default.
sd, err := ComputeDomain(
d,
nil, /* fork version */
nil /* genesis val root */)
if err != nil {
return err
}
if err := VerifySigningRoot(sr.Message, sr.Message.Pubkey, sr.Signature, sd); err != nil {
return ErrSigFailedToVerify
}
return nil
}

View File

@@ -0,0 +1,42 @@
package signing_test
import (
"testing"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/require"
)
func TestVerifyRegistrationSignature(t *testing.T) {
sk, err := bls.RandKey()
require.NoError(t, err)
reg := &ethpb.ValidatorRegistrationV1{
FeeRecipient: bytesutil.PadTo([]byte("fee"), 20),
GasLimit: 123456,
Timestamp: uint64(time.Now().Unix()),
Pubkey: sk.PublicKey().Marshal(),
}
d := params.BeaconConfig().DomainApplicationBuilder
domain, err := signing.ComputeDomain(d, nil, nil)
require.NoError(t, err)
sr, err := signing.ComputeSigningRoot(reg, domain)
require.NoError(t, err)
sk.Sign(sr[:]).Marshal()
sReg := &ethpb.SignedValidatorRegistrationV1{
Message: reg,
Signature: sk.Sign(sr[:]).Marshal(),
}
require.NoError(t, signing.VerifyRegistrationSignature(sReg))
sReg.Signature = []byte("bad")
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrSigFailedToVerify)
sReg.Message = nil
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrNilRegistration)
}

View File

@@ -53,11 +53,6 @@ func HigherEqualThanAltairVersionAndEpoch(s state.BeaconState, e primitives.Epoc
return s.Version() >= version.Altair && e >= params.BeaconConfig().AltairForkEpoch
}
// PeerDASIsActive checks whether peerDAS is active at the provided slot.
func PeerDASIsActive(slot primitives.Slot) bool {
return params.FuluEnabled() && slots.ToEpoch(slot) >= params.BeaconConfig().FuluForkEpoch
}
// CanUpgradeToAltair returns true if the input `slot` can upgrade to Altair.
// Spec code:
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH

View File

@@ -13,7 +13,6 @@ go_library(
visibility = [
"//beacon-chain:__subpackages__",
"//cmd/beacon-chain:__subpackages__",
"//genesis:__subpackages__",
"//testing/slasher/simulator:__pkg__",
"//tools:__subpackages__",
],

View File

@@ -38,6 +38,7 @@ go_library(
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/db/iface:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/genesis:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
@@ -49,7 +50,6 @@ go_library(
"//container/slice:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//genesis:go_default_library",
"//io/file:go_default_library",
"//monitoring/progress:go_default_library",
"//monitoring/tracing:go_default_library",
@@ -106,6 +106,7 @@ go_test(
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/db/iface:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/genesis:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
@@ -115,7 +116,6 @@ go_test(
"//consensus-types/light-client:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//genesis/embedded:go_default_library",
"//proto/dbval:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -8,7 +8,6 @@ import (
dbIface "github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/encoding/ssz/detect"
"github.com/OffchainLabs/prysm/v6/genesis"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/pkg/errors"
)
@@ -98,22 +97,8 @@ func (s *Store) EnsureEmbeddedGenesis(ctx context.Context) error {
if err != nil {
return err
}
if !state.IsNil(gs) {
if gs != nil && !gs.IsNil() {
return s.SaveGenesisData(ctx, gs)
}
return nil
}
type LegacyGenesisProvider struct {
store *Store
}
func NewLegacyGenesisProvider(store *Store) *LegacyGenesisProvider {
return &LegacyGenesisProvider{store: store}
}
var _ genesis.Provider = &LegacyGenesisProvider{}
func (p *LegacyGenesisProvider) Genesis(ctx context.Context) (state.BeaconState, error) {
return p.store.LegacyGenesisState(ctx)
}

View File

@@ -153,6 +153,13 @@ func decodeLightClientBootstrap(enc []byte) (interfaces.LightClientBootstrap, []
}
m = bootstrap
syncCommitteeHash = enc[len(altairKey) : len(altairKey)+32]
case hasBellatrixKey(enc):
bootstrap := &ethpb.LightClientBootstrapAltair{}
if err := bootstrap.UnmarshalSSZ(enc[len(bellatrixKey)+32:]); err != nil {
return nil, nil, errors.Wrap(err, "could not unmarshal Bellatrix light client bootstrap")
}
m = bootstrap
syncCommitteeHash = enc[len(bellatrixKey) : len(bellatrixKey)+32]
case hasCapellaKey(enc):
bootstrap := &ethpb.LightClientBootstrapCapella{}
if err := bootstrap.UnmarshalSSZ(enc[len(capellaKey)+32:]); err != nil {
@@ -265,6 +272,12 @@ func decodeLightClientUpdate(enc []byte) (interfaces.LightClientUpdate, error) {
return nil, errors.Wrap(err, "could not unmarshal Altair light client update")
}
m = update
case hasBellatrixKey(enc):
update := &ethpb.LightClientUpdateAltair{}
if err := update.UnmarshalSSZ(enc[len(bellatrixKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal Bellatrix light client update")
}
m = update
case hasCapellaKey(enc):
update := &ethpb.LightClientUpdateCapella{}
if err := update.UnmarshalSSZ(enc[len(capellaKey):]); err != nil {
@@ -297,6 +310,8 @@ func keyForLightClientUpdate(v int) ([]byte, error) {
return denebKey, nil
case version.Capella:
return capellaKey, nil
case version.Bellatrix:
return bellatrixKey, nil
case version.Altair:
return altairKey, nil
default:

View File

@@ -46,7 +46,21 @@ func createUpdate(t *testing.T, v int) (interfaces.LightClientUpdate, error) {
slot = primitives.Slot(config.AltairForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
header, err = light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
Beacon: &pb.BeaconBlockHeader{
Slot: 1,
Slot: slot,
ProposerIndex: primitives.ValidatorIndex(rand.Int()),
ParentRoot: sampleRoot,
StateRoot: sampleRoot,
BodyRoot: sampleRoot,
},
})
require.NoError(t, err)
st, err = util.NewBeaconState()
require.NoError(t, err)
case version.Bellatrix:
slot = primitives.Slot(config.BellatrixForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
header, err = light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
Beacon: &pb.BeaconBlockHeader{
Slot: slot,
ProposerIndex: primitives.ValidatorIndex(rand.Int()),
ParentRoot: sampleRoot,
StateRoot: sampleRoot,
@@ -60,7 +74,7 @@ func createUpdate(t *testing.T, v int) (interfaces.LightClientUpdate, error) {
slot = primitives.Slot(config.CapellaForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
header, err = light_client.NewWrappedHeader(&pb.LightClientHeaderCapella{
Beacon: &pb.BeaconBlockHeader{
Slot: 1,
Slot: slot,
ProposerIndex: primitives.ValidatorIndex(rand.Int()),
ParentRoot: sampleRoot,
StateRoot: sampleRoot,
@@ -88,7 +102,7 @@ func createUpdate(t *testing.T, v int) (interfaces.LightClientUpdate, error) {
slot = primitives.Slot(config.DenebForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
header, err = light_client.NewWrappedHeader(&pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
Slot: 1,
Slot: slot,
ProposerIndex: primitives.ValidatorIndex(rand.Int()),
ParentRoot: sampleRoot,
StateRoot: sampleRoot,
@@ -116,7 +130,7 @@ func createUpdate(t *testing.T, v int) (interfaces.LightClientUpdate, error) {
slot = primitives.Slot(config.ElectraForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
header, err = light_client.NewWrappedHeader(&pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
Slot: 1,
Slot: slot,
ProposerIndex: primitives.ValidatorIndex(rand.Int()),
ParentRoot: sampleRoot,
StateRoot: sampleRoot,
@@ -144,7 +158,7 @@ func createUpdate(t *testing.T, v int) (interfaces.LightClientUpdate, error) {
slot = primitives.Slot(config.FuluForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
header, err = light_client.NewWrappedHeader(&pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
Slot: 1,
Slot: slot,
ProposerIndex: primitives.ValidatorIndex(rand.Int()),
ParentRoot: sampleRoot,
StateRoot: sampleRoot,
@@ -192,71 +206,30 @@ func TestStore_LightClientUpdate_CanSaveRetrieve(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.AltairForkEpoch = 0
cfg.CapellaForkEpoch = 1
cfg.DenebForkEpoch = 2
cfg.ElectraForkEpoch = 3
cfg.FuluForkEpoch = 3
cfg.BellatrixForkEpoch = 1
cfg.CapellaForkEpoch = 2
cfg.DenebForkEpoch = 3
cfg.ElectraForkEpoch = 4
cfg.FuluForkEpoch = 5
params.OverrideBeaconConfig(cfg)
db := setupDB(t)
ctx := t.Context()
t.Run("Altair", func(t *testing.T) {
update, err := createUpdate(t, version.Altair)
require.NoError(t, err)
period := uint64(1)
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
t.Run(version.String(testVersion), func(t *testing.T) {
update, err := createUpdate(t, testVersion)
require.NoError(t, err)
period := uint64(1)
err = db.SaveLightClientUpdate(ctx, period, update)
require.NoError(t, err)
err = db.SaveLightClientUpdate(ctx, period, update)
require.NoError(t, err)
retrievedUpdate, err := db.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.DeepEqual(t, update, retrievedUpdate, "retrieved update does not match saved update")
})
t.Run("Capella", func(t *testing.T) {
update, err := createUpdate(t, version.Capella)
require.NoError(t, err)
period := uint64(1)
err = db.SaveLightClientUpdate(ctx, period, update)
require.NoError(t, err)
retrievedUpdate, err := db.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.DeepEqual(t, update, retrievedUpdate, "retrieved update does not match saved update")
})
t.Run("Deneb", func(t *testing.T) {
update, err := createUpdate(t, version.Deneb)
require.NoError(t, err)
period := uint64(1)
err = db.SaveLightClientUpdate(ctx, period, update)
require.NoError(t, err)
retrievedUpdate, err := db.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.DeepEqual(t, update, retrievedUpdate, "retrieved update does not match saved update")
})
t.Run("Electra", func(t *testing.T) {
update, err := createUpdate(t, version.Electra)
require.NoError(t, err)
period := uint64(1)
err = db.SaveLightClientUpdate(ctx, period, update)
require.NoError(t, err)
retrievedUpdate, err := db.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.DeepEqual(t, update, retrievedUpdate, "retrieved update does not match saved update")
})
t.Run("Fulu", func(t *testing.T) {
update, err := createUpdate(t, version.Fulu)
require.NoError(t, err)
period := uint64(1)
err = db.SaveLightClientUpdate(ctx, period, update)
require.NoError(t, err)
retrievedUpdate, err := db.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.DeepEqual(t, update, retrievedUpdate, "retrieved update does not match saved update")
})
retrievedUpdate, err := db.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.DeepEqual(t, update, retrievedUpdate, "retrieved update does not match saved update")
})
}
}
func TestStore_LightClientUpdates_canRetrieveRange(t *testing.T) {
@@ -584,12 +557,21 @@ func TestStore_LightClientBootstrap_CanSaveRetrieve(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.AltairForkEpoch = 0
cfg.CapellaForkEpoch = 1
cfg.DenebForkEpoch = 2
cfg.ElectraForkEpoch = 3
cfg.BellatrixForkEpoch = 1
cfg.CapellaForkEpoch = 2
cfg.DenebForkEpoch = 3
cfg.ElectraForkEpoch = 4
cfg.EpochsPerSyncCommitteePeriod = 1
params.OverrideBeaconConfig(cfg)
versionToForkEpoch := map[int]primitives.Epoch{
version.Altair: params.BeaconConfig().AltairForkEpoch,
version.Bellatrix: params.BeaconConfig().BellatrixForkEpoch,
version.Capella: params.BeaconConfig().CapellaForkEpoch,
version.Deneb: params.BeaconConfig().DenebForkEpoch,
version.Electra: params.BeaconConfig().ElectraForkEpoch,
}
db := setupDB(t)
ctx := t.Context()
@@ -599,89 +581,38 @@ func TestStore_LightClientBootstrap_CanSaveRetrieve(t *testing.T) {
require.IsNil(t, retrievedBootstrap)
})
t.Run("Altair", func(t *testing.T) {
bootstrap, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().AltairForkEpoch) * uint64(params.BeaconConfig().SlotsPerEpoch)))
require.NoError(t, err)
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
t.Run(version.String(testVersion), func(t *testing.T) {
bootstrap, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(versionToForkEpoch[testVersion]) * uint64(params.BeaconConfig().SlotsPerEpoch)))
require.NoError(t, err)
err = bootstrap.SetCurrentSyncCommittee(createRandomSyncCommittee())
require.NoError(t, err)
err = bootstrap.SetCurrentSyncCommittee(createRandomSyncCommittee())
require.NoError(t, err)
err = db.SaveLightClientBootstrap(ctx, []byte("blockRootAltair"), bootstrap)
require.NoError(t, err)
blockRoot := []byte("blockRootAltair" + version.String(testVersion))
retrievedBootstrap, err := db.LightClientBootstrap(ctx, []byte("blockRootAltair"))
require.NoError(t, err)
require.DeepEqual(t, bootstrap.Header(), retrievedBootstrap.Header(), "retrieved bootstrap header does not match saved bootstrap header")
require.DeepEqual(t, bootstrap.CurrentSyncCommittee(), retrievedBootstrap.CurrentSyncCommittee(), "retrieved bootstrap sync committee does not match saved bootstrap sync committee")
savedBranch, err := bootstrap.CurrentSyncCommitteeBranch()
require.NoError(t, err)
retrievedBranch, err := retrievedBootstrap.CurrentSyncCommitteeBranch()
require.NoError(t, err)
require.DeepEqual(t, savedBranch, retrievedBranch, "retrieved bootstrap sync committee branch does not match saved bootstrap sync committee branch")
})
err = db.SaveLightClientBootstrap(ctx, blockRoot, bootstrap)
require.NoError(t, err)
t.Run("Capella", func(t *testing.T) {
bootstrap, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().CapellaForkEpoch) * uint64(params.BeaconConfig().SlotsPerEpoch)))
require.NoError(t, err)
err = bootstrap.SetCurrentSyncCommittee(createRandomSyncCommittee())
require.NoError(t, err)
err = db.SaveLightClientBootstrap(ctx, []byte("blockRootCapella"), bootstrap)
require.NoError(t, err)
retrievedBootstrap, err := db.LightClientBootstrap(ctx, []byte("blockRootCapella"))
require.NoError(t, err)
require.DeepEqual(t, bootstrap.Header(), retrievedBootstrap.Header(), "retrieved bootstrap header does not match saved bootstrap header")
require.DeepEqual(t, bootstrap.CurrentSyncCommittee(), retrievedBootstrap.CurrentSyncCommittee(), "retrieved bootstrap sync committee does not match saved bootstrap sync committee")
savedBranch, err := bootstrap.CurrentSyncCommitteeBranch()
require.NoError(t, err)
retrievedBranch, err := retrievedBootstrap.CurrentSyncCommitteeBranch()
require.NoError(t, err)
require.DeepEqual(t, savedBranch, retrievedBranch, "retrieved bootstrap sync committee branch does not match saved bootstrap sync committee branch")
})
t.Run("Deneb", func(t *testing.T) {
bootstrap, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().DenebForkEpoch) * uint64(params.BeaconConfig().SlotsPerEpoch)))
require.NoError(t, err)
err = bootstrap.SetCurrentSyncCommittee(createRandomSyncCommittee())
require.NoError(t, err)
err = db.SaveLightClientBootstrap(ctx, []byte("blockRootDeneb"), bootstrap)
require.NoError(t, err)
retrievedBootstrap, err := db.LightClientBootstrap(ctx, []byte("blockRootDeneb"))
require.NoError(t, err)
require.DeepEqual(t, bootstrap.Header(), retrievedBootstrap.Header(), "retrieved bootstrap header does not match saved bootstrap header")
require.DeepEqual(t, bootstrap.CurrentSyncCommittee(), retrievedBootstrap.CurrentSyncCommittee(), "retrieved bootstrap sync committee does not match saved bootstrap sync committee")
savedBranch, err := bootstrap.CurrentSyncCommitteeBranch()
require.NoError(t, err)
retrievedBranch, err := retrievedBootstrap.CurrentSyncCommitteeBranch()
require.NoError(t, err)
require.DeepEqual(t, savedBranch, retrievedBranch, "retrieved bootstrap sync committee branch does not match saved bootstrap sync committee branch")
})
t.Run("Electra", func(t *testing.T) {
bootstrap, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().ElectraForkEpoch) * uint64(params.BeaconConfig().SlotsPerEpoch)))
require.NoError(t, err)
err = bootstrap.SetCurrentSyncCommittee(createRandomSyncCommittee())
require.NoError(t, err)
err = db.SaveLightClientBootstrap(ctx, []byte("blockRootElectra"), bootstrap)
require.NoError(t, err)
retrievedBootstrap, err := db.LightClientBootstrap(ctx, []byte("blockRootElectra"))
require.NoError(t, err)
require.DeepEqual(t, bootstrap.Header(), retrievedBootstrap.Header(), "retrieved bootstrap header does not match saved bootstrap header")
require.DeepEqual(t, bootstrap.CurrentSyncCommittee(), retrievedBootstrap.CurrentSyncCommittee(), "retrieved bootstrap sync committee does not match saved bootstrap sync committee")
savedBranch, err := bootstrap.CurrentSyncCommitteeBranchElectra()
require.NoError(t, err)
retrievedBranch, err := retrievedBootstrap.CurrentSyncCommitteeBranchElectra()
require.NoError(t, err)
require.DeepEqual(t, savedBranch, retrievedBranch, "retrieved bootstrap sync committee branch does not match saved bootstrap sync committee branch")
})
retrievedBootstrap, err := db.LightClientBootstrap(ctx, blockRoot)
require.NoError(t, err)
require.DeepEqual(t, bootstrap.Header(), retrievedBootstrap.Header(), "retrieved bootstrap header does not match saved bootstrap header")
require.DeepEqual(t, bootstrap.CurrentSyncCommittee(), retrievedBootstrap.CurrentSyncCommittee(), "retrieved bootstrap sync committee does not match saved bootstrap sync committee")
if testVersion >= version.Electra {
savedBranch, err := bootstrap.CurrentSyncCommitteeBranchElectra()
require.NoError(t, err)
retrievedBranch, err := retrievedBootstrap.CurrentSyncCommitteeBranchElectra()
require.NoError(t, err)
require.DeepEqual(t, savedBranch, retrievedBranch, "retrieved bootstrap sync committee branch does not match saved bootstrap sync committee branch")
} else {
savedBranch, err := bootstrap.CurrentSyncCommitteeBranch()
require.NoError(t, err)
retrievedBranch, err := retrievedBootstrap.CurrentSyncCommitteeBranch()
require.NoError(t, err)
require.DeepEqual(t, savedBranch, retrievedBranch, "retrieved bootstrap sync committee branch does not match saved bootstrap sync committee branch")
}
})
}
}
func TestStore_LightClientBootstrap_MultipleBootstrapsWithSameSyncCommittee(t *testing.T) {
@@ -839,6 +770,7 @@ func createDefaultLightClientBootstrap(currentSlot primitives.Slot) (interfaces.
m = &pb.LightClientBootstrapAltair{
Header: &pb.LightClientHeaderAltair{
Beacon: &pb.BeaconBlockHeader{
Slot: currentSlot,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
@@ -851,6 +783,7 @@ func createDefaultLightClientBootstrap(currentSlot primitives.Slot) (interfaces.
m = &pb.LightClientBootstrapCapella{
Header: &pb.LightClientHeaderCapella{
Beacon: &pb.BeaconBlockHeader{
Slot: currentSlot,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
@@ -877,6 +810,7 @@ func createDefaultLightClientBootstrap(currentSlot primitives.Slot) (interfaces.
m = &pb.LightClientBootstrapDeneb{
Header: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
Slot: currentSlot,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
@@ -905,6 +839,7 @@ func createDefaultLightClientBootstrap(currentSlot primitives.Slot) (interfaces.
m = &pb.LightClientBootstrapElectra{
Header: &pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
Slot: currentSlot,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),

View File

@@ -6,12 +6,14 @@ import (
"fmt"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/genesis"
statenative "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
@@ -63,16 +65,21 @@ func (s *Store) StateOrError(ctx context.Context, blockRoot [32]byte) (state.Bea
return st, nil
}
func (s *Store) GenesisState(ctx context.Context) (state.BeaconState, error) {
return genesis.State()
}
// GenesisState returns the genesis state in beacon chain.
func (s *Store) LegacyGenesisState(ctx context.Context) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.LegacyGenesisState")
func (s *Store) GenesisState(ctx context.Context) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.GenesisState")
defer span.End()
var err error
cached, err := genesis.State(params.BeaconConfig().ConfigName)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
span.SetAttributes(trace.BoolAttribute("cache_hit", cached != nil))
if cached != nil {
return cached, nil
}
var st state.BeaconState
err = s.db.View(func(tx *bolt.Tx) error {
// Retrieve genesis block's signing root from blocks bucket,

View File

@@ -3,9 +3,9 @@ package kv
import (
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/genesis"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/genesis/embedded"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
)
@@ -18,7 +18,7 @@ func TestSaveOrigin(t *testing.T) {
ctx := t.Context()
db := setupDB(t)
st, err := embedded.ByName(params.MainnetName)
st, err := genesis.State(params.MainnetName)
require.NoError(t, err)
sb, err := st.MarshalSSZ()

View File

@@ -47,10 +47,12 @@ var (
GetPayloadBodiesByRangeV1,
GetBlobsV1,
}
electraEngineEndpoints = []string{
NewPayloadMethodV4,
GetPayloadMethodV4,
}
fuluEngineEndpoints = []string{
GetPayloadMethodV5,
GetBlobsV2,
@@ -96,12 +98,7 @@ const (
// GetBlobsV2 request string for JSON-RPC.
GetBlobsV2 = "engine_getBlobsV2"
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
// TODO: Remove temporarily needed hack since geth takes an input blobs txs with blobs proofs, and
// does the heavy lifting of building cells proofs, while normally this is done by the tx sender.
// This is a cool hack because it lets the CL to act as if the tx sender actually computed the cells proofs.
// The only counter part is the `engine_getPayloadv<x>` takes a lot of time.
// defaultEngineTimeout = time.Second
defaultEngineTimeout = 2 * time.Second
defaultEngineTimeout = time.Second
)
var (
@@ -125,8 +122,8 @@ type Reconstructor interface {
ReconstructFullBellatrixBlockBatch(
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
) ([]interfaces.SignedBeaconBlock, error)
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) ([]blocks.VerifiedRODataColumn, error)
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error)
}
// EngineCaller defines a client that can interact with an Ethereum
@@ -275,17 +272,17 @@ func (s *Service) ForkchoiceUpdated(
}
func getPayloadMethodAndMessage(slot primitives.Slot) (string, proto.Message) {
pe := slots.ToEpoch(slot)
if pe >= params.BeaconConfig().FuluForkEpoch {
epoch := slots.ToEpoch(slot)
if epoch >= params.BeaconConfig().FuluForkEpoch {
return GetPayloadMethodV5, &pb.ExecutionBundleFulu{}
}
if pe >= params.BeaconConfig().ElectraForkEpoch {
if epoch >= params.BeaconConfig().ElectraForkEpoch {
return GetPayloadMethodV4, &pb.ExecutionBundleElectra{}
}
if pe >= params.BeaconConfig().DenebForkEpoch {
if epoch >= params.BeaconConfig().DenebForkEpoch {
return GetPayloadMethodV3, &pb.ExecutionPayloadDenebWithValueAndBlobsBundle{}
}
if pe >= params.BeaconConfig().CapellaForkEpoch {
if epoch >= params.BeaconConfig().CapellaForkEpoch {
return GetPayloadMethodV2, &pb.ExecutionPayloadCapellaWithValue{}
}
return GetPayloadMethod, &pb.ExecutionPayload{}
@@ -338,7 +335,7 @@ func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
elSupportedEndpoints[method] = true
}
unsupported := make([]string, 0, len(supportedEngineEndpoints))
unsupported := make([]string, 0)
for _, method := range supportedEngineEndpoints {
if !elSupportedEndpoints[method] {
unsupported = append(unsupported, method)
@@ -670,14 +667,15 @@ func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlo
return nil, wrapWithBlockRoot(err, blockRoot, "blob KZG commitments")
}
// Collect KZG hashes for all blobs
var kzgHashes []common.Hash
// Collect KZG hashes for all blobs.
versionedHashes := make([]common.Hash, 0, len(kzgCommitments))
for _, commitment := range kzgCommitments {
kzgHashes = append(kzgHashes, primitives.ConvertKzgCommitmentToVersionedHash(commitment))
versionedHash := primitives.ConvertKzgCommitmentToVersionedHash(commitment)
versionedHashes = append(versionedHashes, versionedHash)
}
// Fetch all blobsAndCellsProofs from EL
blobAndProofV2s, err := s.GetBlobsV2(ctx, kzgHashes)
// Fetch all blobsAndCellsProofs from the execution client.
blobAndProofV2s, err := s.GetBlobsV2(ctx, versionedHashes)
if err != nil {
return nil, wrapWithBlockRoot(err, blockRoot, "get blobs V2")
}
@@ -689,21 +687,23 @@ func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlo
}
// Extract the blobs and proofs from the blobAndProofV2s.
blobs := make([][]byte, 0, len(blobAndProofV2s))
cellProofs := make([][]byte, 0, len(blobAndProofV2s))
blobs, cellProofs := make([][]byte, 0, len(blobAndProofV2s)), make([][]byte, 0, len(blobAndProofV2s))
for _, blobsAndProofs := range blobAndProofV2s {
if blobsAndProofs == nil {
return nil, wrapWithBlockRoot(errMissingBlobsAndProofsFromEL, blockRoot, "")
}
blobs = append(blobs, blobsAndProofs.Blob)
cellProofs = append(cellProofs, blobsAndProofs.KzgProofs...)
blobs, cellProofs = append(blobs, blobsAndProofs.Blob), append(cellProofs, blobsAndProofs.KzgProofs...)
}
// Construct the data column sidcars from the blobs and cell proofs provided by the execution client.
dataColumnSidecars, err := peerdas.ConstructDataColumnSidecars(signedROBlock, blobs, cellProofs)
if err != nil {
return nil, wrapWithBlockRoot(err, blockRoot, "construct data column sidecars")
}
// Finally, construct verified RO data column sidecars.
// We trust the execution layer we are connected to, so we can upgrade the read only data column sidecar into a verified one.
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecars))
for _, dataColumnSidecar := range dataColumnSidecars {
roDataColumn, err := blocks.NewRODataColumnWithRoot(dataColumnSidecar, blockRoot)
@@ -711,12 +711,11 @@ func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlo
return nil, wrapWithBlockRoot(err, blockRoot, "new read-only data column with root")
}
// We trust the execution layer we are connected to, so we can upgrade the read only data column sidecar into a verified one.
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
}
log.Debug("Data columns successfully reconstructed from EL")
log.Debug("Data columns successfully reconstructed from the execution client.")
return verifiedRODataColumns, nil
}

View File

@@ -17,6 +17,7 @@ go_library(
"//beacon-chain/execution/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",

View File

@@ -4,6 +4,7 @@ import (
"context"
"math/big"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
@@ -111,11 +112,11 @@ func (e *EngineClient) ReconstructFullBellatrixBlockBatch(
}
// ReconstructBlobSidecars is a mock implementation of the ReconstructBlobSidecars method.
func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadOnlySignedBeaconBlock, [32]byte, func(uint64) bool) ([]blocks.VerifiedROBlob, error) {
func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadOnlySignedBeaconBlock, [fieldparams.RootLength]byte, func(uint64) bool) ([]blocks.VerifiedROBlob, error) {
return e.BlobSidecars, e.ErrorBlobSidecars
}
func (e *EngineClient) ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) ([]blocks.VerifiedRODataColumn, error) {
func (e *EngineClient) ReconstructDataColumnSidecars(context.Context, interfaces.ReadOnlySignedBeaconBlock, [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
}

View File

@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"clear_db.go",
"config.go",
"log.go",
"node.go",
@@ -51,6 +50,7 @@ go_library(
"//beacon-chain/sync/backfill:go_default_library",
"//beacon-chain/sync/backfill/coverage:go_default_library",
"//beacon-chain/sync/checkpoint:go_default_library",
"//beacon-chain/sync/genesis:go_default_library",
"//beacon-chain/sync/initial-sync:go_default_library",
"//beacon-chain/verification:go_default_library",
"//cmd:go_default_library",
@@ -60,7 +60,6 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
"//encoding/bytesutil:go_default_library",
"//genesis:go_default_library",
"//monitoring/prometheus:go_default_library",
"//monitoring/tracing:go_default_library",
"//runtime:go_default_library",

View File

@@ -1,101 +0,0 @@
package node
import (
"context"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/slasherkv"
"github.com/OffchainLabs/prysm/v6/cmd"
"github.com/pkg/errors"
"github.com/urfave/cli/v2"
)
type dbClearer struct {
shouldClear bool
force bool
confirmed bool
}
const (
clearConfirmation = "This will delete your beacon chain database stored in your data directory. " +
"Your database backups will not be removed - do you want to proceed? (Y/N)"
clearDeclined = "Database will not be deleted. No changes have been made."
)
func (c *dbClearer) clearKV(ctx context.Context, db *kv.Store) (*kv.Store, error) {
if !c.shouldProceed() {
return db, nil
}
log.Warning("Removing database")
if err := db.ClearDB(); err != nil {
return nil, errors.Wrap(err, "could not clear database")
}
return kv.NewKVStore(ctx, db.DatabasePath())
}
func (c *dbClearer) clearBlobs(bs *filesystem.BlobStorage) error {
if !c.shouldProceed() {
return nil
}
log.Warning("Removing blob storage")
if err := bs.Clear(); err != nil {
return errors.Wrap(err, "could not clear blob storage")
}
return nil
}
func (c *dbClearer) clearColumns(cs *filesystem.DataColumnStorage) error {
if !c.shouldProceed() {
return nil
}
log.Warning("Removing data columns storage")
if err := cs.Clear(); err != nil {
return errors.Wrap(err, "could not clear data columns storage")
}
return nil
}
func (c *dbClearer) clearSlasher(ctx context.Context, db *slasherkv.Store) (*slasherkv.Store, error) {
if !c.shouldProceed() {
return db, nil
}
log.Warning("Removing slasher database")
if err := db.ClearDB(); err != nil {
return nil, errors.Wrap(err, "could not clear slasher database")
}
return slasherkv.NewKVStore(ctx, db.DatabasePath())
}
func (c *dbClearer) shouldProceed() bool {
if !c.shouldClear {
return false
}
if c.force {
return true
}
if !c.confirmed {
confirmed, err := cmd.ConfirmAction(clearConfirmation, clearDeclined)
if err != nil {
log.WithError(err).Error("Not clearing db due to confirmation error")
return false
}
c.confirmed = confirmed
}
return c.confirmed
}
func newDbClearer(cliCtx *cli.Context) *dbClearer {
force := cliCtx.Bool(cmd.ForceClearDB.Name)
return &dbClearer{
shouldClear: cliCtx.Bool(cmd.ClearDB.Name) || force,
force: force,
}
}

View File

@@ -52,6 +52,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/backfill"
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/backfill/coverage"
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/checkpoint"
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/genesis"
initialsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/initial-sync"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v6/cmd"
@@ -61,7 +62,6 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/container/slice"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/genesis"
"github.com/OffchainLabs/prysm/v6/monitoring/prometheus"
"github.com/OffchainLabs/prysm/v6/runtime"
"github.com/OffchainLabs/prysm/v6/runtime/prereqs"
@@ -113,7 +113,7 @@ type BeaconNode struct {
slasherAttestationsFeed *event.Feed
finalizedStateAtStartUp state.BeaconState
serviceFlagOpts *serviceFlagOpts
GenesisProviders []genesis.Provider
GenesisInitializer genesis.Initializer
CheckpointInitializer checkpoint.Initializer
forkChoicer forkchoice.ForkChoicer
clockWaiter startup.ClockWaiter
@@ -128,7 +128,6 @@ type BeaconNode struct {
syncChecker *initialsync.SyncChecker
slasherEnabled bool
lcStore *lightclient.Store
ConfigOptions []params.Option
}
// New creates a new node instance, sets up configuration options, and registers
@@ -137,13 +136,18 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
if err := configureBeacon(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not set beacon configuration options")
}
// Initializes any forks here.
params.BeaconConfig().InitializeForkSchedule()
registry := runtime.NewServiceRegistry()
ctx := cliCtx.Context
beacon := &BeaconNode{
cliCtx: cliCtx,
ctx: ctx,
cancel: cancel,
services: runtime.NewServiceRegistry(),
services: registry,
stop: make(chan struct{}),
stateFeed: new(event.Feed),
blockFeed: new(event.Feed),
@@ -163,7 +167,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
syncChecker: &initialsync.SyncChecker{},
custodyInfo: &peerdas.CustodyInfo{},
slasherEnabled: cliCtx.Bool(flags.SlasherFlag.Name),
lcStore: &lightclient.Store{},
}
for _, opt := range opts {
@@ -172,25 +175,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
}
}
dbClearer := newDbClearer(cliCtx)
dataDir := cliCtx.String(cmd.DataDirFlag.Name)
boltFname := filepath.Join(dataDir, kv.BeaconNodeDbDirName)
kvdb, err := openDB(ctx, boltFname, dbClearer)
if err != nil {
return nil, errors.Wrap(err, "could not open database")
}
beacon.db = kvdb
providers := append(beacon.GenesisProviders, kv.NewLegacyGenesisProvider(kvdb))
if err := genesis.Initialize(ctx, dataDir, providers...); err != nil {
return nil, errors.Wrap(err, "could not initialize genesis state")
}
beacon.ConfigOptions = append([]params.Option{params.WithGenesisValidatorsRoot(genesis.ValidatorsRoot())}, beacon.ConfigOptions...)
params.BeaconConfig().ApplyOptions(beacon.ConfigOptions...)
params.BeaconConfig().InitializeForkSchedule()
params.LogDigests(params.BeaconConfig())
synchronizer := startup.NewClockSynchronizer()
beacon.clockWaiter = synchronizer
beacon.forkChoicer = doublylinkedtree.New()
@@ -209,9 +193,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
}
beacon.BlobStorage = blobs
}
if err := dbClearer.clearBlobs(beacon.BlobStorage); err != nil {
return nil, errors.Wrap(err, "could not clear blob storage")
}
if beacon.DataColumnStorage == nil {
dataColumnStorage, err := filesystem.NewDataColumnStorage(cliCtx.Context, beacon.DataColumnStorageOptions...)
@@ -221,11 +202,8 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
beacon.DataColumnStorage = dataColumnStorage
}
if err := dbClearer.clearColumns(beacon.DataColumnStorage); err != nil {
return nil, errors.Wrap(err, "could not clear data column storage")
}
bfs, err := startBaseServices(cliCtx, beacon, depositAddress, dbClearer)
bfs, err := startBaseServices(cliCtx, beacon, depositAddress)
if err != nil {
return nil, errors.Wrap(err, "could not start modules")
}
@@ -256,6 +234,10 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
// their initialization.
beacon.finalizedStateAtStartUp = nil
if features.Get().EnableLightClient {
beacon.lcStore = lightclient.NewLightClientStore()
}
return beacon, nil
}
@@ -309,7 +291,7 @@ func configureBeacon(cliCtx *cli.Context) error {
return nil
}
func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress string, clearer *dbClearer) (*backfill.Store, error) {
func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress string) (*backfill.Store, error) {
ctx := cliCtx.Context
log.Debugln("Starting DB")
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
@@ -317,10 +299,9 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
}
beacon.BlobStorage.WarmCache()
beacon.DataColumnStorage.WarmCache()
log.Debugln("Starting Slashing DB")
if err := beacon.startSlasherDB(cliCtx, clearer); err != nil {
if err := beacon.startSlasherDB(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not start slashing DB")
}
@@ -500,6 +481,43 @@ func (b *BeaconNode) Close() {
close(b.stop)
}
func (b *BeaconNode) clearDB(clearDB, forceClearDB bool, d *kv.Store, dbPath string) (*kv.Store, error) {
var err error
clearDBConfirmed := false
if clearDB && !forceClearDB {
const (
actionText = "This will delete your beacon chain database stored in your data directory. " +
"Your database backups will not be removed - do you want to proceed? (Y/N)"
deniedText = "Database will not be deleted. No changes have been made."
)
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
if err != nil {
return nil, errors.Wrapf(err, "could not confirm action")
}
}
if clearDBConfirmed || forceClearDB {
log.Warning("Removing database")
if err := d.ClearDB(); err != nil {
return nil, errors.Wrap(err, "could not clear database")
}
if err := b.BlobStorage.Clear(); err != nil {
return nil, errors.Wrap(err, "could not clear blob storage")
}
d, err = kv.NewKVStore(b.ctx, dbPath)
if err != nil {
return nil, errors.Wrap(err, "could not create new database")
}
}
return d, nil
}
func (b *BeaconNode) checkAndSaveDepositContract(depositAddress string) error {
knownContract, err := b.db.DepositContractAddress(b.ctx)
if err != nil {
@@ -523,36 +541,60 @@ func (b *BeaconNode) checkAndSaveDepositContract(depositAddress string) error {
return nil
}
func openDB(ctx context.Context, dbPath string, clearer *dbClearer) (*kv.Store, error) {
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
var depositCache cache.DepositCache
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
clearDBRequired := cliCtx.Bool(cmd.ClearDB.Name)
forceClearDBRequired := cliCtx.Bool(cmd.ForceClearDB.Name)
log.WithField("databasePath", dbPath).Info("Checking DB")
d, err := kv.NewKVStore(ctx, dbPath)
d, err := kv.NewKVStore(b.ctx, dbPath)
if err != nil {
return nil, errors.Wrapf(err, "could not create database at %s", dbPath)
return errors.Wrapf(err, "could not create database at %s", dbPath)
}
d, err = clearer.clearKV(ctx, d)
if err != nil {
return nil, errors.Wrap(err, "could not clear database")
if clearDBRequired || forceClearDBRequired {
d, err = b.clearDB(clearDBRequired, forceClearDBRequired, d, dbPath)
if err != nil {
return errors.Wrap(err, "could not clear database")
}
}
return d, d.RunMigrations(ctx)
}
if err := d.RunMigrations(b.ctx); err != nil {
return err
}
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
depositCache, err := depositsnapshot.New()
b.db = d
depositCache, err = depositsnapshot.New()
if err != nil {
return errors.Wrap(err, "could not create deposit cache")
}
b.depositCache = depositCache
if b.GenesisInitializer != nil {
if err := b.GenesisInitializer.Initialize(b.ctx, d); err != nil {
if errors.Is(err, db.ErrExistingGenesisState) {
return errors.Errorf("Genesis state flag specified but a genesis state "+
"exists already. Run again with --%s and/or ensure you are using the "+
"appropriate testnet flag to load the given genesis state.", cmd.ClearDB.Name)
}
return errors.Wrap(err, "could not load genesis from file")
}
}
if err := b.db.EnsureEmbeddedGenesis(b.ctx); err != nil {
return errors.Wrap(err, "could not ensure embedded genesis")
}
if b.CheckpointInitializer != nil {
log.Info("Checkpoint sync - Downloading origin state and block")
if err := b.CheckpointInitializer.Initialize(b.ctx, b.db); err != nil {
if err := b.CheckpointInitializer.Initialize(b.ctx, d); err != nil {
return err
}
}
@@ -564,25 +606,49 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
log.WithField("address", depositAddress).Info("Deposit contract")
return nil
}
func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context, clearer *dbClearer) error {
func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
if !b.slasherEnabled {
return nil
}
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
if cliCtx.IsSet(flags.SlasherDirFlag.Name) {
baseDir = cliCtx.String(flags.SlasherDirFlag.Name)
}
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
log.WithField("databasePath", dbPath).Info("Checking DB")
d, err := slasherkv.NewKVStore(b.ctx, dbPath)
if err != nil {
return err
}
d, err = clearer.clearSlasher(b.ctx, d)
if err != nil {
return errors.Wrap(err, "could not clear slasher database")
clearDBConfirmed := false
if clearDB && !forceClearDB {
actionText := "This will delete your beacon chain database stored in your data directory. " +
"Your database backups will not be removed - do you want to proceed? (Y/N)"
deniedText := "Database will not be deleted. No changes have been made."
clearDBConfirmed, err = cmd.ConfirmAction(actionText, deniedText)
if err != nil {
return err
}
}
if clearDBConfirmed || forceClearDB {
log.Warning("Removing database")
if err := d.ClearDB(); err != nil {
return errors.Wrap(err, "could not clear database")
}
d, err = slasherkv.NewKVStore(b.ctx, dbPath)
if err != nil {
return errors.Wrap(err, "could not create new database")
}
}
b.slasherDB = d
return nil
}
@@ -820,10 +886,10 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
regularsync.WithDataColumnStorage(b.DataColumnStorage),
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
regularsync.WithAvailableBlocker(bFillStore),
regularsync.WithTrackedValidatorsCache(b.trackedValidatorsCache),
regularsync.WithCustodyInfo(b.custodyInfo),
regularsync.WithSlasherEnabled(b.slasherEnabled),
regularsync.WithLightClientStore(b.lcStore),
regularsync.WithBatchVerifierLimit(b.cliCtx.Int(flags.BatchVerifierLimit.Name)),
)
return b.services.RegisterService(rs)
}
@@ -847,8 +913,6 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
ClockWaiter: b.clockWaiter,
InitialSyncComplete: complete,
BlobStorage: b.BlobStorage,
DataColumnStorage: b.DataColumnStorage,
CustodyInfo: b.custodyInfo,
}, opts...)
return b.services.RegisterService(is)
}
@@ -943,7 +1007,6 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
FinalizationFetcher: chainService,
BlockReceiver: chainService,
BlobReceiver: chainService,
DataColumnReceiver: chainService,
AttestationReceiver: chainService,
GenesisTimeFetcher: chainService,
GenesisFetcher: chainService,
@@ -971,7 +1034,6 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
Router: router,
ClockWaiter: b.clockWaiter,
BlobStorage: b.BlobStorage,
DataColumnStorage: b.DataColumnStorage,
TrackedValidatorsCache: b.trackedValidatorsCache,
PayloadIDCache: b.payloadIDCache,
LCStore: b.lcStore,
@@ -1113,7 +1175,6 @@ func (b *BeaconNode) registerPrunerService(cliCtx *cli.Context) error {
func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.Store) error {
pa := peers.NewAssigner(b.fetchP2P().Peers(), b.forkChoicer)
// TODO: Add backfill for data column storage
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
if err != nil {
return errors.Wrap(err, "error initializing backfill service")

View File

@@ -5,7 +5,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/builder"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
"github.com/OffchainLabs/prysm/v6/config/params"
)
// Option for beacon node configuration.
@@ -52,13 +51,6 @@ func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
}
}
func WithConfigOptions(opt ...params.Option) Option {
return func(bn *BeaconNode) error {
bn.ConfigOptions = append(bn.ConfigOptions, opt...)
return nil
}
}
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
func WithDataColumnStorage(bs *filesystem.DataColumnStorage) Option {
return func(bn *BeaconNode) error {

View File

@@ -71,6 +71,7 @@ go_library(
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//network:go_default_library",
"//network/forks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/metadata:go_default_library",
"//runtime:go_default_library",
@@ -167,6 +168,7 @@ go_test(
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network:go_default_library",
"//network/forks:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/metadata:go_default_library",

View File

@@ -15,6 +15,7 @@ import (
"github.com/OffchainLabs/prysm/v6/crypto/hash"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v6/network/forks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/pkg/errors"
@@ -278,8 +279,14 @@ func (s *Service) BroadcastLightClientOptimisticUpdate(ctx context.Context, upda
return errors.New("attempted to broadcast nil light client optimistic update")
}
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(digest)); err != nil {
forkDigest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), s.genesisValidatorsRoot)
if err != nil {
err := errors.Wrap(err, "could not retrieve fork digest")
tracing.AnnotateError(span, err)
return err
}
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(forkDigest)); err != nil {
log.WithError(err).Debug("Failed to broadcast light client optimistic update")
err := errors.Wrap(err, "could not publish message")
tracing.AnnotateError(span, err)
@@ -298,7 +305,13 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
return errors.New("attempted to broadcast nil light client finality update")
}
forkDigest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
forkDigest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), s.genesisValidatorsRoot)
if err != nil {
err := errors.Wrap(err, "could not retrieve fork digest")
tracing.AnnotateError(span, err)
return err
}
if err := s.broadcastObject(ctx, update, lcFinalityToTopic(forkDigest)); err != nil {
log.WithError(err).Debug("Failed to broadcast light client finality update")
err := errors.Wrap(err, "could not publish message")

View File

@@ -16,13 +16,12 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
testpb "github.com/OffchainLabs/prysm/v6/proto/testing"
"github.com/OffchainLabs/prysm/v6/runtime/version"
@@ -62,7 +61,6 @@ func TestService_Broadcast(t *testing.T) {
topic := "/eth2/%x/testing"
// Set a test gossip mapping for testpb.TestSimpleMessage.
GossipTypeMapping[reflect.TypeOf(msg)] = topic
p.clock = startup.NewClock(p.genesisTime, bytesutil.ToBytes32(p.genesisValidatorsRoot))
digest, err := p.currentForkDigest()
require.NoError(t, err)
topic = fmt.Sprintf(topic, digest)
@@ -552,7 +550,9 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
require.NoError(t, err)
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientOptimisticUpdateTopicFormat
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot), p.genesisValidatorsRoot)
require.NoError(t, err)
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, digest)
// External peer subscribes to the topic.
topic += p.Encoding().ProtocolSuffix()
@@ -617,7 +617,9 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
require.NoError(t, err)
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientFinalityUpdateTopicFormat
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot), p.genesisValidatorsRoot)
require.NoError(t, err)
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, digest)
// External peer subscribes to the topic.
topic += p.Encoding().ProtocolSuffix()

View File

@@ -9,7 +9,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/config/params"
@@ -223,11 +222,29 @@ func (s *Service) RefreshPersistentSubnets() {
// Get the sync subnet bitfield in our metadata.
currentBitSInMetadata := s.Metadata().SyncnetsBitfield()
// Is our sync bitvector record up to date?
isBitSUpToDate := bytes.Equal(bitS, inRecordBitS) && bytes.Equal(bitS, currentBitSInMetadata)
// Compare current epoch with the Fulu fork epoch.
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
// We add `1` to the current epoch because we want to prepare one epoch before the Fulu fork.
if currentEpoch+1 < fuluForkEpoch {
// Altair behaviour.
if metadataVersion == version.Altair && isBitVUpToDate && isBitSUpToDate {
// Nothing to do, return early.
return
}
// Some data have changed, update our record and metadata.
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
// Ping all peers to inform them of new metadata
s.pingPeersAndLogEnr()
return
}
// Get the current custody group count.
custodyGroupCount := s.cfg.CustodyInfo.ActualGroupCount()
@@ -238,26 +255,6 @@ func (s *Service) RefreshPersistentSubnets() {
return
}
// We add `1` to the current epoch because we want to prepare one epoch before the Fulu fork.
if currentEpoch+1 < fuluForkEpoch {
// Is our custody group count record up to date?
isCustodyGroupCountUpToDate := custodyGroupCount == inRecordCustodyGroupCount
// Altair behaviour.
if metadataVersion == version.Altair && isBitVUpToDate && isBitSUpToDate && (!params.FuluEnabled() || isCustodyGroupCountUpToDate) {
// Nothing to do, return early.
return
}
// Some data have changed, update our record and metadata.
s.updateSubnetRecordWithMetadataV2(bitV, bitS, custodyGroupCount)
// Ping all peers to inform them of new metadata
s.pingPeersAndLogEnr()
return
}
// Get the custody group count in our metadata.
inMetadataCustodyGroupCount := s.Metadata().CustodyGroupCount()
@@ -390,7 +387,6 @@ func (s *Service) listenForNewNodes() {
s.Peers().RandomizeBackOff(peerInfo.ID)
wg.Add(1)
go func(info *peer.AddrInfo) {
log.WithField("enr", node.String()).WithField("peerInfo", info.String()).Debug("attempting to connect with peer")
if err := s.connectWithPeer(s.ctx, *info); err != nil {
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
}
@@ -508,10 +504,8 @@ func (s *Service) createLocalNode(
localNode.SetFallbackIP(ipAddr)
localNode.SetFallbackUDP(udpPort)
clock := startup.NewClock(s.genesisTime, [32]byte(s.genesisValidatorsRoot))
currentEntry := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
nextEntry := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
if err := updateENR(localNode, currentEntry, nextEntry); err != nil {
localNode, err = addForkEntry(localNode, s.genesisTime, s.genesisValidatorsRoot)
if err != nil {
return nil, errors.Wrap(err, "could not add eth2 fork version entry to enr")
}

View File

@@ -250,7 +250,7 @@ func TestCreateLocalNode(t *testing.T) {
// Check cgc config.
custodyGroupCount := new(uint64)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().CustodyGroupCountKey, custodyGroupCount)))
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(peerdas.CustodyGroupCountEnrKey, custodyGroupCount)))
require.Equal(t, params.BeaconConfig().CustodyRequirement, *custodyGroupCount)
})
}
@@ -621,7 +621,7 @@ func checkPingCountCacheMetadataRecord(
if expected.custodyGroupCount != nil {
// Check custody subnet count in ENR.
var actualCustodyGroupCount uint64
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().CustodyGroupCountKey, &actualCustodyGroupCount))
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(peerdas.CustodyGroupCountEnrKey, &actualCustodyGroupCount))
require.NoError(t, err)
require.Equal(t, *expected.custodyGroupCount, actualCustodyGroupCount)

View File

@@ -3,10 +3,13 @@ package p2p
import (
"bytes"
"fmt"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/network/forks"
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
prysmTime "github.com/OffchainLabs/prysm/v6/time"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/pkg/errors"
@@ -22,9 +25,7 @@ func (s *Service) currentForkDigest() ([4]byte, error) {
if !s.isInitialized() {
return [4]byte{}, errors.New("state is not initialized")
}
clock := startup.NewClock(s.genesisTime, [32]byte(s.genesisValidatorsRoot))
return params.ForkDigest(clock.CurrentEpoch()), nil
return forks.CreateForkDigest(s.genesisTime, s.genesisValidatorsRoot)
}
// Compares fork ENRs between an incoming peer's record and our node's
@@ -73,36 +74,41 @@ func (s *Service) compareForkENR(record *enr.Record) error {
return nil
}
func updateENR(node *enode.LocalNode, entry, next params.NetworkScheduleEntry) error {
// Adds a fork entry as an ENR record under the Ethereum consensus EnrKey for
// the local node. The fork entry is an ssz-encoded enrForkID type
// which takes into account the current fork version from the current
// epoch to create a fork digest, the next fork version,
// and the next fork epoch.
func addForkEntry(
node *enode.LocalNode,
genesisTime time.Time,
genesisValidatorsRoot []byte,
) (*enode.LocalNode, error) {
digest, err := forks.CreateForkDigest(genesisTime, genesisValidatorsRoot)
if err != nil {
return nil, err
}
currentSlot := slots.Since(genesisTime)
currentEpoch := slots.ToEpoch(currentSlot)
if prysmTime.Now().Before(genesisTime) {
currentEpoch = 0
}
nextForkVersion, nextForkEpoch, err := forks.NextForkData(currentEpoch)
if err != nil {
return nil, err
}
enrForkID := &pb.ENRForkID{
CurrentForkDigest: entry.ForkDigest[:],
NextForkVersion: next.ForkVersion[:],
NextForkEpoch: next.Epoch,
CurrentForkDigest: digest[:],
NextForkVersion: nextForkVersion[:],
NextForkEpoch: nextForkEpoch,
}
if entry.Epoch == next.Epoch {
enrForkID.NextForkEpoch = params.BeaconConfig().FarFutureEpoch
}
logFields := logrus.Fields{
"CurrentForkDigest": fmt.Sprintf("%#x", enrForkID.CurrentForkDigest),
"NextForkVersion": fmt.Sprintf("%#x", enrForkID.NextForkVersion),
"NextForkEpoch": fmt.Sprintf("%d", enrForkID.NextForkEpoch),
}
if params.BeaconConfig().FuluForkEpoch != params.BeaconConfig().FarFutureEpoch {
if entry.ForkDigest == next.ForkDigest {
node.Set(enr.WithEntry(nfdEnrKey, make([]byte, len(next.ForkDigest))))
} else {
node.Set(enr.WithEntry(nfdEnrKey, next.ForkDigest[:]))
}
logFields[nfdEnrKey] = fmt.Sprintf("%#x", next.ForkDigest)
}
log.WithFields(logFields).Info("updating ENR Fork ID")
enc, err := enrForkID.MarshalSSZ()
if err != nil {
return err
return nil, err
}
forkEntry := enr.WithEntry(eth2ENRKey, enc)
node.Set(forkEntry)
return nil
return node, nil
}
// Retrieves an enrForkID from an ENR record by key lookup

View File

@@ -10,11 +10,11 @@ import (
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
@@ -230,8 +230,10 @@ func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
nextForkVersion := []byte{0, 0, 0, 1}
params.OverrideBeaconConfig(c)
clock := startup.NewClock(time.Now(), [32]byte{})
digest := params.ForkDigest(clock.CurrentEpoch())
genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, 32)
digest, err := forks.CreateForkDigest(genesisTime, make([]byte, 32))
require.NoError(t, err)
enrForkID := &pb.ENRForkID{
CurrentForkDigest: digest[:],
NextForkVersion: nextForkVersion,
@@ -253,7 +255,7 @@ func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
localNode := enode.NewLocalNode(db, pkey)
localNode.Set(entry)
want, err := signing.ComputeForkDigest([]byte{0, 0, 0, 0}, clock.GenesisValidatorsRootSlice())
want, err := signing.ComputeForkDigest([]byte{0, 0, 0, 0}, genesisValidatorsRoot)
require.NoError(t, err)
resp, err := forkEntry(localNode.Node().Record())
@@ -280,11 +282,7 @@ func TestAddForkEntry_Genesis(t *testing.T) {
params.OverrideBeaconConfig(bCfg)
localNode := enode.NewLocalNode(db, pkey)
clock := startup.NewClock(time.Now(), bCfg.GenesisValidatorsRoot)
current := clock.CurrentEpoch()
entry := params.GetNetworkScheduleEntry(current)
next := params.GetNetworkScheduleEntry(entry.Epoch)
require.NoError(t, updateENR(localNode, entry, next))
localNode, err = addForkEntry(localNode, time.Now().Add(10*time.Second), bytesutil.PadTo([]byte{'A', 'B', 'C', 'D'}, 32))
require.NoError(t, err)
forkEntry, err := forkEntry(localNode.Node().Record())
require.NoError(t, err)

View File

@@ -10,19 +10,26 @@ import (
// changes.
func (s *Service) forkWatcher() {
slotTicker := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
var scheduleEntry params.NetworkScheduleEntry
for {
select {
case currSlot := <-slotTicker.C():
currEpoch := slots.ToEpoch(currSlot)
newEntry := params.GetNetworkScheduleEntry(currEpoch)
if newEntry.ForkDigest != scheduleEntry.ForkDigest {
nextEntry := params.NextNetworkScheduleEntry(currEpoch)
if err := updateENR(s.dv5Listener.LocalNode(), newEntry, nextEntry); err != nil {
log.WithFields(newEntry.LogFields()).WithError(err).Error("Could not add fork entry")
continue // don't replace scheduleEntry until this succeeds
if currEpoch == params.BeaconConfig().AltairForkEpoch ||
currEpoch == params.BeaconConfig().BellatrixForkEpoch ||
currEpoch == params.BeaconConfig().CapellaForkEpoch ||
currEpoch == params.BeaconConfig().DenebForkEpoch ||
currEpoch == params.BeaconConfig().ElectraForkEpoch ||
currEpoch == params.BeaconConfig().FuluForkEpoch {
// If we are in the fork epoch, we update our enr with
// the updated fork digest. These repeatedly does
// this over the epoch, which might be slightly wasteful
// but is fine nonetheless.
if s.dv5Listener != nil { // make sure it's not a local network
_, err := addForkEntry(s.dv5Listener.LocalNode(), s.genesisTime, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not add fork entry")
}
}
scheduleEntry = newEntry
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")

View File

@@ -10,7 +10,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/peerdata"
prysmTime "github.com/OffchainLabs/prysm/v6/time"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
@@ -28,22 +27,6 @@ func peerMultiaddrString(conn network.Conn) string {
return fmt.Sprintf("%s/p2p/%s", remoteMultiaddr, remotePeerID)
}
func peerAgentString(pid peer.ID, host host.Host) string {
const unknownAgent = "unknown"
rawAgent, err := host.Peerstore().Get(pid, "AgentVersion")
if err != nil {
return unknownAgent
}
agent, ok := rawAgent.(string)
if !ok {
return unknownAgent
}
return agent
}
func (s *Service) connectToPeer(conn network.Conn) {
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connected)
// Go through the handshake process.
@@ -76,7 +59,6 @@ func (s *Service) disconnectFromPeerOnError(
WithError(badPeerErr).
WithFields(logrus.Fields{
"multiaddr": peerMultiaddrString(conn),
"agent": peerAgentString(remotePeerID, s.host),
"direction": conn.Stat().Direction.String(),
"remainingActivePeers": len(s.peers.Active()),
}).

View File

@@ -7,6 +7,7 @@ import (
"github.com/OffchainLabs/prysm/v6/crypto/hash"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/math"
"github.com/OffchainLabs/prysm/v6/network/forks"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
)
@@ -38,7 +39,7 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
copy(msg, "invalid")
return bytesutil.UnsafeCastToString(msg)
}
_, fEpoch, err := params.ForkDataFromDigest(digest)
_, fEpoch, err := forks.RetrieveForkDataFromDigest(digest, genesisValidatorsRoot)
if err != nil {
// Impossible condition that should
// never be hit.

View File

@@ -7,10 +7,10 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/crypto/hash"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/golang/snappy"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
@@ -18,22 +18,22 @@ import (
func TestMsgID_HashesCorrectly(t *testing.T) {
params.SetupTestConfigCleanup(t)
clock := startup.NewClock(time.Now(), bytesutil.ToBytes32([]byte{'A'}))
valRoot := clock.GenesisValidatorsRoot()
d := params.ForkDigest(clock.CurrentEpoch())
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
d, err := forks.CreateForkDigest(time.Now(), genesisValidatorsRoot)
assert.NoError(t, err)
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
invalidSnappy := [32]byte{'J', 'U', 'N', 'K'}
pMsg := &pubsubpb.Message{Data: invalidSnappy[:], Topic: &tpc}
hashedData := hash.Hash(append(params.BeaconConfig().MessageDomainInvalidSnappy[:], pMsg.Data...))
msgID := string(hashedData[:20])
assert.Equal(t, msgID, p2p.MsgID(valRoot[:], pMsg), "Got incorrect msg id")
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
enc := snappy.Encode(nil, validObj[:])
nMsg := &pubsubpb.Message{Data: enc, Topic: &tpc}
hashedData = hash.Hash(append(params.BeaconConfig().MessageDomainValidSnappy[:], validObj[:]...))
msgID = string(hashedData[:20])
assert.Equal(t, msgID, p2p.MsgID(valRoot[:], nMsg), "Got incorrect msg id")
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
}
func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {

View File

@@ -54,7 +54,7 @@ type PeerData struct {
NextValidTime time.Time
// Chain related data.
MetaData metadata.Metadata
ChainState *ethpb.StatusV2
ChainState *ethpb.Status
ChainStateLastUpdated time.Time
ChainStateValidationError error
// Scorers internal data.

View File

@@ -5,6 +5,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/peerdata"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
)
var _ Scorer = (*BadResponsesScorer)(nil)
@@ -128,14 +129,13 @@ func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) error {
// isBadPeerNoLock is lock-free version of IsBadPeer.
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) error {
// if peerData, ok := s.store.PeerData(pid); ok {
// TODO: Remote this out of devnet
// if peerData.BadResponses >= s.config.Threshold {
// return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
// }
if peerData, ok := s.store.PeerData(pid); ok {
if peerData.BadResponses >= s.config.Threshold {
return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
}
// return nil
// }
return nil
}
return nil
}

View File

@@ -1,6 +1,7 @@
package scorers_test
import (
"sort"
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
@@ -12,41 +13,39 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
)
// TODO: Uncomment when out of devnet
// func TestScorers_BadResponses_Score(t *testing.T) {
// const pid = "peer1"
func TestScorers_BadResponses_Score(t *testing.T) {
const pid = "peer1"
// ctx, cancel := context.WithCancel(context.Background())
// defer cancel()
ctx := t.Context()
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
// PeerLimit: 30,
// ScorerParams: &scorers.Config{
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
// Threshold: 4,
// },
// },
// })
// scorer := peerStatuses.Scorers().BadResponsesScorer()
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 4,
},
},
})
scorer := peerStatuses.Scorers().BadResponsesScorer()
// assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
// scorer.Increment(pid)
// assert.NoError(t, scorer.IsBadPeer(pid))
// assert.Equal(t, -2.5, scorer.Score(pid))
scorer.Increment(pid)
assert.NoError(t, scorer.IsBadPeer(pid))
assert.Equal(t, -2.5, scorer.Score(pid))
// scorer.Increment(pid)
// assert.NoError(t, scorer.IsBadPeer(pid))
// assert.Equal(t, float64(-5), scorer.Score(pid))
scorer.Increment(pid)
assert.NoError(t, scorer.IsBadPeer(pid))
assert.Equal(t, float64(-5), scorer.Score(pid))
// scorer.Increment(pid)
// assert.NoError(t, scorer.IsBadPeer(pid))
// assert.Equal(t, float64(-7.5), scorer.Score(pid))
scorer.Increment(pid)
assert.NoError(t, scorer.IsBadPeer(pid))
assert.Equal(t, float64(-7.5), scorer.Score(pid))
// scorer.Increment(pid)
// assert.NotNil(t, scorer.IsBadPeer(pid))
// assert.Equal(t, -100.0, scorer.Score(pid))
// }
scorer.Increment(pid)
assert.NotNil(t, scorer.IsBadPeer(pid))
assert.Equal(t, -100.0, scorer.Score(pid))
}
func TestScorers_BadResponses_ParamsThreshold(t *testing.T) {
ctx := t.Context()
@@ -138,60 +137,56 @@ func TestScorers_BadResponses_Decay(t *testing.T) {
assert.Equal(t, 1, badResponses, "unexpected bad responses for pid3")
}
// TODO: Uncomment when out of devnet
// func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
// ctx, cancel := context.WithCancel(context.Background())
// defer cancel()
func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
ctx := t.Context()
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
// PeerLimit: 30,
// ScorerParams: &scorers.Config{},
// })
// scorer := peerStatuses.Scorers().BadResponsesScorer()
// pid := peer.ID("peer1")
// assert.NoError(t, scorer.IsBadPeer(pid))
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{},
})
scorer := peerStatuses.Scorers().BadResponsesScorer()
pid := peer.ID("peer1")
assert.NoError(t, scorer.IsBadPeer(pid))
// peerStatuses.Add(nil, pid, nil, network.DirUnknown)
// assert.NoError(t, scorer.IsBadPeer(pid))
peerStatuses.Add(nil, pid, nil, network.DirUnknown)
assert.NoError(t, scorer.IsBadPeer(pid))
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
// scorer.Increment(pid)
// if i == scorers.DefaultBadResponsesThreshold-1 {
// assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
// } else {
// assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
// }
// }
// }
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
scorer.Increment(pid)
if i == scorers.DefaultBadResponsesThreshold-1 {
assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
} else {
assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
}
}
}
// TODO: Uncomment when out of devnet
// func TestScorers_BadResponses_BadPeers(t *testing.T) {
// ctx, cancel := context.WithCancel(context.Background())
// defer cancel()
func TestScorers_BadResponses_BadPeers(t *testing.T) {
ctx := t.Context()
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
// PeerLimit: 30,
// ScorerParams: &scorers.Config{},
// })
// scorer := peerStatuses.Scorers().BadResponsesScorer()
// pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
// for i := 0; i < len(pids); i++ {
// peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
// }
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
// scorer.Increment(pids[1])
// scorer.Increment(pids[2])
// scorer.Increment(pids[4])
// }
// assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
// assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
// assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
// assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
// assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
// want := []peer.ID{pids[1], pids[2], pids[4]}
// badPeers := scorer.BadPeers()
// sort.Slice(badPeers, func(i, j int) bool {
// return badPeers[i] < badPeers[j]
// })
// assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
// }
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{},
})
scorer := peerStatuses.Scorers().BadResponsesScorer()
pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
for i := 0; i < len(pids); i++ {
peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
}
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
scorer.Increment(pids[1])
scorer.Increment(pids[2])
scorer.Increment(pids[4])
}
assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
want := []peer.ID{pids[1], pids[2], pids[4]}
badPeers := scorer.BadPeers()
sort.Slice(badPeers, func(i, j int) bool {
return badPeers[i] < badPeers[j]
})
assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
}

View File

@@ -42,7 +42,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
},
check: func(scorer *scorers.GossipScorer) {
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
assert.Equal(t, nil, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
_, _, topicMap, err := scorer.GossipData("peer1")
assert.NoError(t, err)
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")

View File

@@ -112,7 +112,7 @@ func (s *PeerStatusScorer) BadPeers() []peer.ID {
}
// SetPeerStatus sets chain state data for a given peer.
func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.StatusV2, validationError error) {
func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.Status, validationError error) {
s.store.Lock()
defer s.store.Unlock()
@@ -130,14 +130,14 @@ func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.StatusV2, v
// PeerStatus gets the chain state of the given remote peer.
// This can return nil if there is no known chain state for the peer.
// This will error if the peer does not exist.
func (s *PeerStatusScorer) PeerStatus(pid peer.ID) (*pb.StatusV2, error) {
func (s *PeerStatusScorer) PeerStatus(pid peer.ID) (*pb.Status, error) {
s.store.RLock()
defer s.store.RUnlock()
return s.peerStatusNoLock(pid)
}
// peerStatusNoLock lock-free version of PeerStatus.
func (s *PeerStatusScorer) peerStatusNoLock(pid peer.ID) (*pb.StatusV2, error) {
func (s *PeerStatusScorer) peerStatusNoLock(pid peer.ID) (*pb.Status, error) {
if peerData, ok := s.store.PeerData(pid); ok {
if peerData.ChainState == nil {
return nil, peerdata.ErrNoPeerStatus

View File

@@ -35,7 +35,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
name: "existent bad peer",
update: func(scorer *scorers.PeerStatusScorer) {
scorer.SetHeadSlot(0)
scorer.SetPeerStatus("peer1", &pb.StatusV2{
scorer.SetPeerStatus("peer1", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: 64,
}, p2ptypes.ErrWrongForkDigestVersion)
@@ -48,7 +48,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
name: "existent peer no head slot for the host node is known",
update: func(scorer *scorers.PeerStatusScorer) {
scorer.SetHeadSlot(0)
scorer.SetPeerStatus("peer1", &pb.StatusV2{
scorer.SetPeerStatus("peer1", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: 64,
}, nil)
@@ -61,7 +61,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
name: "existent peer head is before ours",
update: func(scorer *scorers.PeerStatusScorer) {
scorer.SetHeadSlot(128)
scorer.SetPeerStatus("peer1", &pb.StatusV2{
scorer.SetPeerStatus("peer1", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: 64,
}, nil)
@@ -75,12 +75,12 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
update: func(scorer *scorers.PeerStatusScorer) {
headSlot := primitives.Slot(128)
scorer.SetHeadSlot(headSlot)
scorer.SetPeerStatus("peer1", &pb.StatusV2{
scorer.SetPeerStatus("peer1", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: headSlot + 64,
}, nil)
// Set another peer to a higher score.
scorer.SetPeerStatus("peer2", &pb.StatusV2{
scorer.SetPeerStatus("peer2", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: headSlot + 128,
}, nil)
@@ -95,7 +95,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
update: func(scorer *scorers.PeerStatusScorer) {
headSlot := primitives.Slot(128)
scorer.SetHeadSlot(headSlot)
scorer.SetPeerStatus("peer1", &pb.StatusV2{
scorer.SetPeerStatus("peer1", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: headSlot + 64,
}, nil)
@@ -108,7 +108,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
name: "existent peer no max known slot",
update: func(scorer *scorers.PeerStatusScorer) {
scorer.SetHeadSlot(0)
scorer.SetPeerStatus("peer1", &pb.StatusV2{
scorer.SetPeerStatus("peer1", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: 0,
}, nil)
@@ -141,7 +141,7 @@ func TestScorers_PeerStatus_IsBadPeer(t *testing.T) {
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid))
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.StatusV2{}, p2ptypes.ErrWrongForkDigestVersion)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid))
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
}
@@ -160,9 +160,9 @@ func TestScorers_PeerStatus_BadPeers(t *testing.T) {
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid3))
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.StatusV2{}, p2ptypes.ErrWrongForkDigestVersion)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.StatusV2{}, nil)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.StatusV2{}, p2ptypes.ErrWrongForkDigestVersion)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.Status{}, nil)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid1))
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid2))
@@ -179,12 +179,12 @@ func TestScorers_PeerStatus_PeerStatus(t *testing.T) {
})
status, err := peerStatuses.Scorers().PeerStatusScorer().PeerStatus("peer1")
require.ErrorContains(t, peerdata.ErrPeerUnknown.Error(), err)
assert.Equal(t, (*pb.StatusV2)(nil), status)
assert.Equal(t, (*pb.Status)(nil), status)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer1", &pb.StatusV2{
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer1", &pb.Status{
HeadSlot: 128,
}, nil)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer2", &pb.StatusV2{
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer2", &pb.Status{
HeadSlot: 128,
}, p2ptypes.ErrInvalidEpoch)
status, err = peerStatuses.Scorers().PeerStatusScorer().PeerStatus("peer1")

View File

@@ -211,102 +211,99 @@ func TestScorers_Service_Score(t *testing.T) {
})
}
// TODO: Uncomment when out of devnet
// func TestScorers_Service_loop(t *testing.T) {
// ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
// defer cancel()
func TestScorers_Service_loop(t *testing.T) {
ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second)
defer cancel()
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
// PeerLimit: 30,
// ScorerParams: &scorers.Config{
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
// Threshold: 5,
// DecayInterval: 50 * time.Millisecond,
// },
// BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
// DecayInterval: 25 * time.Millisecond,
// Decay: 64,
// },
// },
// })
// s1 := peerStatuses.Scorers().BadResponsesScorer()
// s2 := peerStatuses.Scorers().BlockProviderScorer()
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 5,
DecayInterval: 50 * time.Millisecond,
},
BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
DecayInterval: 25 * time.Millisecond,
Decay: 64,
},
},
})
s1 := peerStatuses.Scorers().BadResponsesScorer()
s2 := peerStatuses.Scorers().BlockProviderScorer()
// pid1 := peer.ID("peer1")
// peerStatuses.Add(nil, pid1, nil, network.DirUnknown)
// for i := 0; i < s1.Params().Threshold+5; i++ {
// s1.Increment(pid1)
// }
// assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
pid1 := peer.ID("peer1")
peerStatuses.Add(nil, pid1, nil, network.DirUnknown)
for i := 0; i < s1.Params().Threshold+5; i++ {
s1.Increment(pid1)
}
assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
// s2.IncrementProcessedBlocks("peer1", 221)
// assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
s2.IncrementProcessedBlocks("peer1", 221)
assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
// done := make(chan struct{}, 1)
// go func() {
// defer func() {
// done <- struct{}{}
// }()
// ticker := time.NewTicker(50 * time.Millisecond)
// defer ticker.Stop()
// for {
// select {
// case <-ticker.C:
// if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
// return
// }
// case <-ctx.Done():
// t.Error("Timed out")
// return
// }
// }
// }()
done := make(chan struct{}, 1)
go func() {
defer func() {
done <- struct{}{}
}()
ticker := time.NewTicker(50 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
return
}
case <-ctx.Done():
t.Error("Timed out")
return
}
}
}()
// <-done
// assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
// assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
// }
<-done
assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
}
// TODO: Uncomment when out of devnet
// func TestScorers_Service_IsBadPeer(t *testing.T) {
// peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
// PeerLimit: 30,
// ScorerParams: &scorers.Config{
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
// Threshold: 2,
// DecayInterval: 50 * time.Second,
// },
// },
// })
func TestScorers_Service_IsBadPeer(t *testing.T) {
peerStatuses := peers.NewStatus(t.Context(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 2,
DecayInterval: 50 * time.Second,
},
},
})
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
// peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
// peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
// }
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
}
// TODO: Uncomment when out of devnet
// func TestScorers_Service_BadPeers(t *testing.T) {
// peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
// PeerLimit: 30,
// ScorerParams: &scorers.Config{
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
// Threshold: 2,
// DecayInterval: 50 * time.Second,
// },
// },
// })
func TestScorers_Service_BadPeers(t *testing.T) {
peerStatuses := peers.NewStatus(t.Context(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 2,
DecayInterval: 50 * time.Second,
},
},
})
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
// assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
// for _, pid := range []peer.ID{"peer1", "peer3"} {
// peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
// peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
// }
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
// assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
// }
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
for _, pid := range []peer.ID{"peer1", "peer3"} {
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
}
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
}

View File

@@ -205,14 +205,14 @@ func (p *Status) ENR(pid peer.ID) (*enr.Record, error) {
}
// SetChainState sets the chain state of the given remote peer.
func (p *Status) SetChainState(pid peer.ID, chainState *pb.StatusV2) {
func (p *Status) SetChainState(pid peer.ID, chainState *pb.Status) {
p.scorers.PeerStatusScorer().SetPeerStatus(pid, chainState, nil)
}
// ChainState gets the chain state of the given remote peer.
// This will error if the peer does not exist.
// This will error if there is no known chain state for the peer.
func (p *Status) ChainState(pid peer.ID) (*pb.StatusV2, error) {
func (p *Status) ChainState(pid peer.ID) (*pb.Status, error) {
return p.scorers.PeerStatusScorer().PeerStatus(pid)
}
@@ -710,8 +710,7 @@ func (p *Status) deprecatedPrune() {
// This method may not return the absolute highest finalized epoch, but the finalized epoch in which
// most peers can serve blocks (plurality voting). Ideally, all peers would be reporting the same
// finalized epoch but some may be behind due to their own latency, or because of their finalized
// epoch at the time we queried them. Returns epoch number and list of peers that are at or beyond
// that epoch.
// epoch at the time we queried them.
func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch) (primitives.Epoch, []peer.ID) {
// Retrieve all connected peers.
connected := p.Connected()
@@ -781,10 +780,7 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch)
// BestNonFinalized returns the highest known epoch, higher than ours,
// and is shared by at least minPeers.
func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch primitives.Epoch) (primitives.Epoch, []peer.ID) {
// Retrieve all connected peers.
connected := p.Connected()
// Calculate our head slot.
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
ourHeadSlot := slotsPerEpoch.Mul(uint64(ourHeadEpoch))
@@ -798,7 +794,6 @@ func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch primitives.Epoch) (
pidHead := make(map[peer.ID]primitives.Slot, len(connected))
potentialPIDs := make([]peer.ID, 0, len(connected))
for _, pid := range connected {
peerChainState, err := p.ChainState(pid)
// Skip if the peer's head epoch is not defined, or if the peer's head slot is
@@ -808,7 +803,6 @@ func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch primitives.Epoch) (
}
epoch := slots.ToEpoch(peerChainState.HeadSlot)
epochVotes[epoch]++
pidEpoch[pid] = epoch
pidHead[pid] = peerChainState.HeadSlot
@@ -1045,23 +1039,19 @@ func (p *Status) isfromBadIP(pid peer.ID) error {
return nil
}
// ip, err := manet.ToIP(peerData.Address)
// if err != nil {
// return errors.Wrap(err, "to ip")
// }
ip, err := manet.ToIP(peerData.Address)
if err != nil {
return errors.Wrap(err, "to ip")
}
// if val, ok := p.ipTracker[ip.String()]; ok {
// if val > CollocationLimit {
// TODO: Remove this out of denvet.
// return errors.Errorf("colocation limit exceeded: got %d - limit %d", val, CollocationLimit)
// log.WithFields(logrus.Fields{
// "pid": pid,
// "ip": ip.String(),
// "colocationCount": val,
// "colocationLimit": CollocationLimit,
// }).Debug("Colocation limit exceeded. Peer should be banned.")
// }
// }
if val, ok := p.ipTracker[ip.String()]; ok {
if val > CollocationLimit {
return errors.Errorf(
"colocation limit exceeded: got %d - limit %d for peer %v with IP %v",
val, CollocationLimit, pid, ip.String(),
)
}
}
return nil
}

View File

@@ -2,6 +2,7 @@ package peers_test
import (
"crypto/rand"
"strconv"
"testing"
"time"
@@ -288,7 +289,7 @@ func TestPeerChainState(t *testing.T) {
require.NoError(t, err)
finalizedEpoch := primitives.Epoch(123)
p.SetChainState(id, &pb.StatusV2{FinalizedEpoch: finalizedEpoch})
p.SetChainState(id, &pb.Status{FinalizedEpoch: finalizedEpoch})
resChainState, err := p.ChainState(id)
require.NoError(t, err)
@@ -323,60 +324,59 @@ func TestPeerWithNilChainState(t *testing.T) {
resChainState, err := p.ChainState(id)
require.Equal(t, peerdata.ErrNoPeerStatus, err)
var nothing *pb.StatusV2
var nothing *pb.Status
require.Equal(t, resChainState, nothing)
}
// TODO: Uncomment when out of devnet
// func TestPeerBadResponses(t *testing.T) {
// maxBadResponses := 2
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
// PeerLimit: 30,
// ScorerParams: &scorers.Config{
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
// Threshold: maxBadResponses,
// },
// },
// })
func TestPeerBadResponses(t *testing.T) {
maxBadResponses := 2
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: maxBadResponses,
},
},
})
// id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
// require.NoError(t, err)
// {
// _, err := id.MarshalBinary()
// require.NoError(t, err)
// }
id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
require.NoError(t, err)
{
_, err := id.MarshalBinary()
require.NoError(t, err)
}
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
// address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
// require.NoError(t, err, "Failed to create address")
// direction := network.DirInbound
// p.Add(new(enr.Record), id, address, direction)
address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
require.NoError(t, err, "Failed to create address")
direction := network.DirInbound
p.Add(new(enr.Record), id, address, direction)
// scorer := p.Scorers().BadResponsesScorer()
// resBadResponses, err := scorer.Count(id)
// require.NoError(t, err)
// assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
scorer := p.Scorers().BadResponsesScorer()
resBadResponses, err := scorer.Count(id)
require.NoError(t, err)
assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
// scorer.Increment(id)
// resBadResponses, err = scorer.Count(id)
// require.NoError(t, err)
// assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
scorer.Increment(id)
resBadResponses, err = scorer.Count(id)
require.NoError(t, err)
assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
// scorer.Increment(id)
// resBadResponses, err = scorer.Count(id)
// require.NoError(t, err)
// assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
// assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
scorer.Increment(id)
resBadResponses, err = scorer.Count(id)
require.NoError(t, err)
assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
// scorer.Increment(id)
// resBadResponses, err = scorer.Count(id)
// require.NoError(t, err)
// assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
// assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
// }
scorer.Increment(id)
resBadResponses, err = scorer.Count(id)
require.NoError(t, err)
assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
}
func TestAddMetaData(t *testing.T) {
maxBadResponses := 2
@@ -495,102 +495,100 @@ func TestPeerValidTime(t *testing.T) {
assert.Equal(t, numPeersConnected, len(p.Connected()), "Unexpected number of connected peers")
}
// TODO: Uncomment when out of devnet
// func TestPrune(t *testing.T) {
// maxBadResponses := 2
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
// PeerLimit: 30,
// ScorerParams: &scorers.Config{
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
// Threshold: maxBadResponses,
// },
// },
// })
func TestPrune(t *testing.T) {
maxBadResponses := 2
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: maxBadResponses,
},
},
})
// for i := 0; i < p.MaxPeerLimit()+100; i++ {
// if i%7 == 0 {
// // Peer added as disconnected.
// _ = addPeer(t, p, peers.PeerDisconnected)
// }
// // Peer added to peer handler.
// _ = addPeer(t, p, peers.PeerConnected)
// }
for i := 0; i < p.MaxPeerLimit()+100; i++ {
if i%7 == 0 {
// Peer added as disconnected.
_ = addPeer(t, p, peers.Disconnected)
}
// Peer added to peer handler.
_ = addPeer(t, p, peers.Connected)
}
// disPeers := p.Disconnected()
// firstPID := disPeers[0]
// secondPID := disPeers[1]
// thirdPID := disPeers[2]
disPeers := p.Disconnected()
firstPID := disPeers[0]
secondPID := disPeers[1]
thirdPID := disPeers[2]
// scorer := p.Scorers().BadResponsesScorer()
scorer := p.Scorers().BadResponsesScorer()
// // Make first peer a bad peer
// scorer.Increment(firstPID)
// scorer.Increment(firstPID)
// Make first peer a bad peer
scorer.Increment(firstPID)
scorer.Increment(firstPID)
// // Add bad response for p2.
// scorer.Increment(secondPID)
// Add bad response for p2.
scorer.Increment(secondPID)
// // Prune peers
// p.Prune()
// Prune peers
p.Prune()
// // Bad peer is expected to still be kept in handler.
// badRes, err := scorer.Count(firstPID)
// assert.NoError(t, err, "error is supposed to be nil")
// assert.Equal(t, 2, badRes, "Did not get expected amount")
// Bad peer is expected to still be kept in handler.
badRes, err := scorer.Count(firstPID)
assert.NoError(t, err, "error is supposed to be nil")
assert.Equal(t, 2, badRes, "Did not get expected amount")
// // Not so good peer is pruned away so that we can reduce the
// // total size of the handler.
// _, err = scorer.Count(secondPID)
// assert.ErrorContains(t, "peer unknown", err)
// Not so good peer is pruned away so that we can reduce the
// total size of the handler.
_, err = scorer.Count(secondPID)
assert.ErrorContains(t, "peer unknown", err)
// // Last peer has been removed.
// _, err = scorer.Count(thirdPID)
// assert.ErrorContains(t, "peer unknown", err)
// }
// Last peer has been removed.
_, err = scorer.Count(thirdPID)
assert.ErrorContains(t, "peer unknown", err)
}
// TODO: Uncomment when out of devnet
// func TestPeerIPTracker(t *testing.T) {
// resetCfg := features.InitWithReset(&features.Flags{
// EnablePeerScorer: false,
// })
// defer resetCfg()
// maxBadResponses := 2
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
// PeerLimit: 30,
// ScorerParams: &scorers.Config{
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
// Threshold: maxBadResponses,
// },
// },
// })
func TestPeerIPTracker(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnablePeerScorer: false,
})
defer resetCfg()
maxBadResponses := 2
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: maxBadResponses,
},
},
})
// badIP := "211.227.218.116"
// var badPeers []peer.ID
// for i := 0; i < peers.CollocationLimit+10; i++ {
// port := strconv.Itoa(3000 + i)
// addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
// if err != nil {
// t.Fatal(err)
// }
// badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.PeerConnectionState(ethpb.ConnectionState_DISCONNECTED)))
// }
// for _, pr := range badPeers {
// assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
// }
badIP := "211.227.218.116"
var badPeers []peer.ID
for i := 0; i < peers.CollocationLimit+10; i++ {
port := strconv.Itoa(3000 + i)
addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
if err != nil {
t.Fatal(err)
}
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.ConnectionState(ethpb.ConnectionState_DISCONNECTED)))
}
for _, pr := range badPeers {
assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
}
// // Add in bad peers, so that our records are trimmed out
// // from the peer store.
// for i := 0; i < p.MaxPeerLimit()+100; i++ {
// // Peer added to peer handler.
// pid := addPeer(t, p, peers.PeerDisconnected)
// p.Scorers().BadResponsesScorer().Increment(pid)
// }
// p.Prune()
// Add in bad peers, so that our records are trimmed out
// from the peer store.
for i := 0; i < p.MaxPeerLimit()+100; i++ {
// Peer added to peer handler.
pid := addPeer(t, p, peers.Disconnected)
p.Scorers().BadResponsesScorer().Increment(pid)
}
p.Prune()
// for _, pr := range badPeers {
// assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
// }
// }
for _, pr := range badPeers {
assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
}
}
func TestTrimmedOrderedPeers(t *testing.T) {
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
@@ -618,7 +616,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
// Peer 1
pid1 := addPeer(t, p, peers.Connected)
p.SetChainState(pid1, &pb.StatusV2{
p.SetChainState(pid1, &pb.Status{
HeadSlot: 3 * params.BeaconConfig().SlotsPerEpoch,
FinalizedEpoch: 3,
FinalizedRoot: mockroot3[:],
@@ -626,7 +624,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
// Peer 2
pid2 := addPeer(t, p, peers.Connected)
p.SetChainState(pid2, &pb.StatusV2{
p.SetChainState(pid2, &pb.Status{
HeadSlot: 4 * params.BeaconConfig().SlotsPerEpoch,
FinalizedEpoch: 4,
FinalizedRoot: mockroot4[:],
@@ -634,7 +632,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
// Peer 3
pid3 := addPeer(t, p, peers.Connected)
p.SetChainState(pid3, &pb.StatusV2{
p.SetChainState(pid3, &pb.Status{
HeadSlot: 5 * params.BeaconConfig().SlotsPerEpoch,
FinalizedEpoch: 5,
FinalizedRoot: mockroot5[:],
@@ -642,7 +640,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
// Peer 4
pid4 := addPeer(t, p, peers.Connected)
p.SetChainState(pid4, &pb.StatusV2{
p.SetChainState(pid4, &pb.Status{
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
FinalizedEpoch: 2,
FinalizedRoot: mockroot2[:],
@@ -650,7 +648,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
// Peer 5
pid5 := addPeer(t, p, peers.Connected)
p.SetChainState(pid5, &pb.StatusV2{
p.SetChainState(pid5, &pb.Status{
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
FinalizedEpoch: 2,
FinalizedRoot: mockroot2[:],
@@ -1014,7 +1012,7 @@ func TestStatus_BestPeer(t *testing.T) {
},
})
for _, peerConfig := range tt.peers {
p.SetChainState(addPeer(t, p, peers.Connected), &pb.StatusV2{
p.SetChainState(addPeer(t, p, peers.Connected), &pb.Status{
FinalizedEpoch: peerConfig.finalizedEpoch,
HeadSlot: peerConfig.headSlot,
})
@@ -1041,7 +1039,7 @@ func TestBestFinalized_returnsMaxValue(t *testing.T) {
for i := 0; i <= maxPeers+100; i++ {
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
p.SetChainState(peer.ID(rune(i)), &pb.StatusV2{
p.SetChainState(peer.ID(rune(i)), &pb.Status{
FinalizedEpoch: 10,
})
}
@@ -1064,7 +1062,7 @@ func TestStatus_BestNonFinalized(t *testing.T) {
for i, headSlot := range peerSlots {
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
p.SetChainState(peer.ID(rune(i)), &pb.StatusV2{
p.SetChainState(peer.ID(rune(i)), &pb.Status{
HeadSlot: headSlot,
})
}
@@ -1087,17 +1085,17 @@ func TestStatus_CurrentEpoch(t *testing.T) {
})
// Peer 1
pid1 := addPeer(t, p, peers.Connected)
p.SetChainState(pid1, &pb.StatusV2{
p.SetChainState(pid1, &pb.Status{
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
})
// Peer 2
pid2 := addPeer(t, p, peers.Connected)
p.SetChainState(pid2, &pb.StatusV2{
p.SetChainState(pid2, &pb.Status{
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 5,
})
// Peer 3
pid3 := addPeer(t, p, peers.Connected)
p.SetChainState(pid3, &pb.StatusV2{
p.SetChainState(pid3, &pb.Status{
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
})

View File

@@ -1,12 +1,12 @@
package p2p
import (
"encoding/hex"
"fmt"
"strings"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/network/forks"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/libp2p/go-libp2p/core/peer"
@@ -32,14 +32,6 @@ var _ pubsub.SubscriptionFilter = (*Service)(nil)
// (Note: BlobSidecar is not included in this list since it is superseded by DataColumnSidecar)
const pubsubSubscriptionRequestLimit = 500
func (s *Service) setAllForkDigests() {
entries := params.SortedNetworkScheduleEntries()
s.allForkDigests = make(map[[4]byte]struct{}, len(entries))
for _, entry := range entries {
s.allForkDigests[entry.ForkDigest] = struct{}{}
}
}
// CanSubscribe returns true if the topic is of interest and we could subscribe to it.
func (s *Service) CanSubscribe(topic string) bool {
if !s.isInitialized() {
@@ -56,18 +48,50 @@ func (s *Service) CanSubscribe(topic string) bool {
if parts[1] != "eth2" {
return false
}
var digest [4]byte
dl, err := hex.Decode(digest[:], []byte(parts[2]))
if err == nil && dl != 4 {
err = fmt.Errorf("expected 4 bytes, got %d", dl)
}
phase0ForkDigest, err := s.currentForkDigest()
if err != nil {
log.WithError(err).WithField("topic", topic).WithField("digest", parts[2]).Error("CanSubscribe failed to parse message")
log.WithError(err).Error("Could not determine fork digest")
return false
}
if _, ok := s.allForkDigests[digest]; !ok {
log.WithField("topic", topic).WithField("digest", fmt.Sprintf("%#x", digest)).Error("CanSubscribe failed to find digest in allForkDigests")
altairForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine altair fork digest")
return false
}
bellatrixForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine Bellatrix fork digest")
return false
}
capellaForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine Capella fork digest")
return false
}
denebForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine Deneb fork digest")
return false
}
electraForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().ElectraForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine Electra fork digest")
return false
}
fuluForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().FuluForkEpoch, s.genesisValidatorsRoot)
if err != nil {
log.WithError(err).Error("Could not determine Fulu fork digest")
return false
}
switch parts[2] {
case fmt.Sprintf("%x", phase0ForkDigest):
case fmt.Sprintf("%x", altairForkDigest):
case fmt.Sprintf("%x", bellatrixForkDigest):
case fmt.Sprintf("%x", capellaForkDigest):
case fmt.Sprintf("%x", denebForkDigest):
case fmt.Sprintf("%x", electraForkDigest):
case fmt.Sprintf("%x", fuluForkDigest):
default:
return false
}

View File

@@ -11,6 +11,8 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
prysmTime "github.com/OffchainLabs/prysm/v6/time"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
@@ -21,8 +23,10 @@ func TestService_CanSubscribe(t *testing.T) {
params.SetupTestConfigCleanup(t)
currentFork := [4]byte{0x01, 0x02, 0x03, 0x04}
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
clock := startup.NewClock(time.Now(), [32]byte{})
digest := params.ForkDigest(clock.CurrentEpoch())
genesisTime := time.Now()
var valRoot [32]byte
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
assert.NoError(t, err)
type test struct {
name string
topic string
@@ -104,12 +108,11 @@ func TestService_CanSubscribe(t *testing.T) {
}
tests = append(tests, tt)
}
valRoot := clock.GenesisValidatorsRoot()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
genesisValidatorsRoot: valRoot[:],
genesisTime: clock.GenesisTime(),
genesisTime: genesisTime,
}
if got := s.CanSubscribe(tt.topic); got != tt.want {
t.Errorf("CanSubscribe(%s) = %v, want %v", tt.topic, got, tt.want)
@@ -217,8 +220,10 @@ func TestGossipTopicMapping_scanfcheck_GossipTopicFormattingSanityCheck(t *testi
func TestService_FilterIncomingSubscriptions(t *testing.T) {
params.SetupTestConfigCleanup(t)
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
clock := startup.NewClock(time.Now(), [32]byte{})
digest := params.ForkDigest(clock.CurrentEpoch())
genesisTime := time.Now()
var valRoot [32]byte
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
assert.NoError(t, err)
type args struct {
id peer.ID
subs []*pubsubpb.RPC_SubOpts
@@ -315,12 +320,11 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
},
},
}
valRoot := clock.GenesisValidatorsRoot()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Service{
genesisValidatorsRoot: valRoot[:],
genesisTime: clock.GenesisTime(),
genesisTime: genesisTime,
}
got, err := s.FilterIncomingSubscriptions(tt.args.id, tt.args.subs)
if (err != nil) != tt.wantErr {

View File

@@ -108,8 +108,6 @@ const (
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
// V2 RPC Topics
// RPCStatusTopicV2 defines the v1 topic for the status rpc method.
RPCStatusTopicV2 = protocolPrefix + StatusMessageName + SchemaVersionV2
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
RPCBlocksByRangeTopicV2 = protocolPrefix + BeaconBlocksByRangeMessageName + SchemaVersionV2
// RPCBlocksByRootTopicV2 defines the v2 topic for the blocks by root rpc method.
@@ -132,7 +130,6 @@ var (
RPCTopicMappings = map[string]interface{}{
// RPC Status Message
RPCStatusTopicV1: new(pb.Status),
RPCStatusTopicV2: new(pb.StatusV2),
// RPC Goodbye Message
RPCGoodByeTopicV1: new(primitives.SSZUint64),
@@ -177,8 +174,7 @@ var (
protocolPrefix: true,
}
// Maps all the protocol message names for the different rpc
// topics.
// Maps all the protocol message names for the different rpc topics.
messageMapping = map[string]bool{
StatusMessageName: true,
GoodbyeMessageName: true,
@@ -205,7 +201,6 @@ var (
// Maps all the RPC messages which are to updated in fulu.
fuluMapping = map[string]string{
StatusMessageName: SchemaVersionV2,
MetadataMessageName: SchemaVersionV3,
}

View File

@@ -141,11 +141,6 @@ func TestTopicFromMessage_CorrectType(t *testing.T) {
require.NoError(t, err)
require.Equal(t, "/eth2/beacon_chain/req/beacon_blocks_by_range/2", topic)
// Modified in fulu fork.
topic, err = TopicFromMessage(StatusMessageName, fuluForkEpoch)
require.NoError(t, err)
require.Equal(t, "/eth2/beacon_chain/req/status/2", topic)
// Modified both in altair and fulu fork.
topic, err = TopicFromMessage(MetadataMessageName, fuluForkEpoch)
require.NoError(t, err)

View File

@@ -14,7 +14,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/config/params"
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
@@ -83,8 +82,6 @@ type Service struct {
genesisTime time.Time
genesisValidatorsRoot []byte
activeValidatorCount uint64
clock *startup.Clock
allForkDigests map[[4]byte]struct{}
}
// NewService initializes a new p2p service compatible with shared.Service interface. No
@@ -434,11 +431,10 @@ func (s *Service) awaitStateInitialized() {
if err != nil {
log.WithError(err).Fatal("failed to receive initial genesis data")
}
s.setAllForkDigests()
s.genesisTime = clock.GenesisTime()
gvr := clock.GenesisValidatorsRoot()
s.genesisValidatorsRoot = gvr[:]
_, err = s.currentForkDigest()
_, err = s.currentForkDigest() // initialize fork digest cache
if err != nil {
log.WithError(err).Error("Could not initialize fork digest")
}

View File

@@ -10,9 +10,12 @@ import (
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
prysmTime "github.com/OffchainLabs/prysm/v6/time"
@@ -345,57 +348,58 @@ func TestService_JoinLeaveTopic(t *testing.T) {
func initializeStateWithForkDigest(_ context.Context, t *testing.T, gs startup.ClockSetter) [4]byte {
gt := prysmTime.Now()
gvr := bytesutil.ToBytes32(bytesutil.PadTo([]byte("genesis validators root"), 32))
clock := startup.NewClock(gt, gvr)
require.NoError(t, gs.SetClock(clock))
require.NoError(t, gs.SetClock(startup.NewClock(gt, gvr)))
fd, err := forks.CreateForkDigest(gt, gvr[:])
require.NoError(t, err)
time.Sleep(50 * time.Millisecond) // wait for pubsub filter to initialize.
return params.ForkDigest(clock.CurrentEpoch())
return fd
}
// TODO: Uncomment when out of devnet
// func TestService_connectWithPeer(t *testing.T) {
// params.SetupTestConfigCleanup(t)
// tests := []struct {
// name string
// peers *peers.Status
// info peer.AddrInfo
// wantErr string
// }{
// {
// name: "bad peer",
// peers: func() *peers.Status {
// ps := peers.NewStatus(context.Background(), &peers.StatusConfig{
// ScorerParams: &scorers.Config{},
// })
// for i := 0; i < 10; i++ {
// ps.Scorers().BadResponsesScorer().Increment("bad")
// }
// return ps
// }(),
// info: peer.AddrInfo{ID: "bad"},
// wantErr: "refused to connect to bad peer",
// },
// }
// for _, tt := range tests {
// t.Run(tt.name, func(t *testing.T) {
// h, _, _ := createHost(t, 34567)
// defer func() {
// if err := h.Close(); err != nil {
// t.Fatal(err)
// }
// }()
// ctx := context.Background()
// s := &Service{
// host: h,
// peers: tt.peers,
// }
// err := s.connectWithPeer(ctx, tt.info)
// if len(tt.wantErr) > 0 {
// require.ErrorContains(t, tt.wantErr, err)
// } else {
// require.NoError(t, err)
// }
// })
// }
// }
func TestService_connectWithPeer(t *testing.T) {
params.SetupTestConfigCleanup(t)
tests := []struct {
name string
peers *peers.Status
info peer.AddrInfo
wantErr string
}{
{
name: "bad peer",
peers: func() *peers.Status {
ps := peers.NewStatus(t.Context(), &peers.StatusConfig{
ScorerParams: &scorers.Config{},
})
for i := 0; i < 10; i++ {
ps.Scorers().BadResponsesScorer().Increment("bad")
}
return ps
}(),
info: peer.AddrInfo{ID: "bad"},
wantErr: "refused to connect to bad peer",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h, _, _ := createHost(t, 34567)
defer func() {
if err := h.Close(); err != nil {
t.Fatal(err)
}
}()
ctx := t.Context()
s := &Service{
host: h,
peers: tt.peers,
}
err := s.connectWithPeer(ctx, tt.info)
if len(tt.wantErr) > 0 {
require.ErrorContains(t, tt.wantErr, err)
} else {
require.NoError(t, err)
}
})
}
}

View File

@@ -23,6 +23,7 @@ import (
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/sirupsen/logrus"
)
var (
@@ -34,8 +35,6 @@ var (
custodyGroupCountEnrKey = params.BeaconNetworkConfig().CustodyGroupCountKey
)
const nfdEnrKey = "nfd" // The ENR record key for "nfd" (Next Fork Digest).
// The value used with the subnet, in order
// to create an appropriate key to retrieve
// the relevant lock. This is used to differentiate
@@ -75,8 +74,8 @@ func (s *Service) nodeFilter(topic string, index uint64) (func(node *enode.Node)
// searchForPeers performs a network search for peers subscribed to a particular subnet.
// It exits as soon as one of these conditions is met:
// - It looped during `batchPeriod` duration, or
// - It found `peersToFindCount“ peers corresponding to the `filter` criteria, or
// - It looped through `batchSize` nodes.
// - It found `peersToFindCount“ peers corresponding to the `filter` criteria.
// - Iterator is exhausted.
func searchForPeers(
iterator enode.Iterator,
@@ -148,6 +147,8 @@ func (s *Service) FindPeersWithSubnet(
index uint64,
threshold int,
) (bool, error) {
const minLogInterval = 1 * time.Minute
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
defer span.End()
@@ -167,29 +168,41 @@ func (s *Service) FindPeersWithSubnet(
return false, errors.Wrap(err, "node filter")
}
peersSummary := func(topic string, threshold int) int {
peersSummary := func(topic string, threshold int) (int, int) {
// Retrieve how many peers we have for this topic.
peerCountForTopic := len(s.pubsub.ListPeers(topic))
// Compute how many peers we are missing to reach the threshold.
missingPeerCountForTopic := max(0, threshold-peerCountForTopic)
return missingPeerCountForTopic
return peerCountForTopic, missingPeerCountForTopic
}
// Compute how many peers we are missing to reach the threshold.
missingPeerCountForTopic := peersSummary(topic, threshold)
peerCountForTopic, missingPeerCountForTopic := peersSummary(topic, threshold)
// Exit early if we have enough peers.
if missingPeerCountForTopic == 0 {
return true, nil
}
log := log.WithFields(logrus.Fields{
"topic": topic,
"targetPeerCount": threshold,
})
log.WithField("currentPeerCount", peerCountForTopic).Debug("Searching for new peers for a subnet - start")
lastLogTime := time.Now()
wg := new(sync.WaitGroup)
for {
// If the context is done, we can exit the loop. This is the unhappy path.
if ctx.Err() != nil {
return false, nil
if err := ctx.Err(); err != nil {
return false, errors.Errorf(
"unable to find requisite number of peers for topic %s - only %d out of %d peers available after searching",
topic, peerCountForTopic, threshold,
)
}
// Search for new peers in the network.
@@ -212,14 +225,20 @@ func (s *Service) FindPeersWithSubnet(
wg.Wait()
}
missingPeerCountForTopic := peersSummary(topic, threshold)
peerCountForTopic, missingPeerCountForTopic := peersSummary(topic, threshold)
// If we have enough peers, we can exit the loop. This is the happy path.
if missingPeerCountForTopic == 0 {
break
}
if time.Since(lastLogTime) > minLogInterval {
lastLogTime = time.Now()
log.WithField("currentPeerCount", peerCountForTopic).Debug("Searching for new peers for a subnet - continue")
}
}
log.WithField("currentPeerCount", threshold).Debug("Searching for new peers for a subnet - success")
return true, nil
}
@@ -317,23 +336,11 @@ func (s *Service) updateSubnetRecordWithMetadata(bitV bitfield.Bitvector64) {
// with a new value for a bitfield of subnets tracked. It also record's
// the sync committee subnet in the enr. It also updates the node's
// metadata by increasing the sequence number and the subnets tracked by the node.
func (s *Service) updateSubnetRecordWithMetadataV2(
bitVAtt bitfield.Bitvector64,
bitVSync bitfield.Bitvector4,
custodyGroupCount uint64,
) {
func (s *Service) updateSubnetRecordWithMetadataV2(bitVAtt bitfield.Bitvector64, bitVSync bitfield.Bitvector4) {
entry := enr.WithEntry(attSubnetEnrKey, &bitVAtt)
subEntry := enr.WithEntry(syncCommsSubnetEnrKey, &bitVSync)
localNode := s.dv5Listener.LocalNode()
localNode.Set(entry)
localNode.Set(subEntry)
if params.FuluEnabled() {
custodyGroupCountEntry := enr.WithEntry(custodyGroupCountEnrKey, custodyGroupCount)
localNode.Set(custodyGroupCountEntry)
}
s.dv5Listener.LocalNode().Set(entry)
s.dv5Listener.LocalNode().Set(subEntry)
s.metaData = wrapper.WrappedMetadataV1(&pb.MetaDataV1{
SeqNumber: s.metaData.SequenceNumber() + 1,
Attnets: bitVAtt,
@@ -360,8 +367,10 @@ func (s *Service) updateSubnetRecordWithMetadataV3(
localNode.Set(syncSubnetsEntry)
localNode.Set(custodyGroupCountEntry)
newSeqNumber := s.metaData.SequenceNumber() + 1
s.metaData = wrapper.WrappedMetadataV2(&pb.MetaDataV2{
SeqNumber: s.metaData.SequenceNumber() + 1,
SeqNumber: newSeqNumber,
Attnets: bitVAtt,
Syncnets: bitVSync,
CustodyGroupCount: custodyGroupCount,

View File

@@ -65,7 +65,7 @@ func (m *MockPeersProvider) Peers() *peers.Status {
}
m.peers.Add(createENR(), id0, ma0, network.DirInbound)
m.peers.SetConnectionState(id0, peers.Connected)
m.peers.SetChainState(id0, &pb.StatusV2{FinalizedEpoch: 10})
m.peers.SetChainState(id0, &pb.Status{FinalizedEpoch: 10})
id1, err := peer.Decode(MockRawPeerId1)
if err != nil {
log.WithError(err).Debug("Cannot decode")
@@ -76,7 +76,7 @@ func (m *MockPeersProvider) Peers() *peers.Status {
}
m.peers.Add(createENR(), id1, ma1, network.DirOutbound)
m.peers.SetConnectionState(id1, peers.Connected)
m.peers.SetChainState(id1, &pb.StatusV2{FinalizedEpoch: 11})
m.peers.SetChainState(id1, &pb.Status{FinalizedEpoch: 11})
}
return m.peers
}

View File

@@ -206,14 +206,14 @@ func (s BlobSidecarsByRootReq) Swap(i, j int) {
}
// Len is the number of elements in the collection.
func (s *BlobSidecarsByRootReq) Len() int {
return len(*s)
func (s BlobSidecarsByRootReq) Len() int {
return len(s)
}
// ====================================
// DataColumnsByRootIdentifiers section
// ====================================
var _ ssz.Marshaler = (*DataColumnsByRootIdentifiers)(nil)
var _ ssz.Marshaler = DataColumnsByRootIdentifiers{}
var _ ssz.Unmarshaler = (*DataColumnsByRootIdentifiers)(nil)
// DataColumnsByRootIdentifiers is used to specify a list of data column targets (root+index) in a DataColumnSidecarsByRoot RPC request.
@@ -275,33 +275,33 @@ func (d *DataColumnsByRootIdentifiers) UnmarshalSSZ(buf []byte) error {
return nil
}
func (d *DataColumnsByRootIdentifiers) MarshalSSZ() ([]byte, error) {
func (d DataColumnsByRootIdentifiers) MarshalSSZ() ([]byte, error) {
var err error
count := len(*d)
count := len(d)
maxSize := params.BeaconConfig().MaxRequestBlocksDeneb
if uint64(count) > maxSize {
return nil, errors.Errorf("data column identifiers list exceeds max size: %d > %d", count, maxSize)
}
if len(*d) == 0 {
if len(d) == 0 {
return []byte{}, nil
}
sizes := make([]uint32, count)
valTotal := uint32(0)
for i, elem := range *d {
for i, elem := range d {
if elem == nil {
return nil, errors.New("nil item in DataColumnsByRootIdentifiers list")
}
sizes[i] = uint32(elem.SizeSSZ())
valTotal += sizes[i]
}
offSize := uint32(4 * len(*d))
offSize := uint32(4 * len(d))
out := make([]byte, offSize, offSize+valTotal)
for i := range sizes {
binary.LittleEndian.PutUint32(out[i*4:i*4+4], offSize)
offSize += sizes[i]
}
for _, elem := range *d {
for _, elem := range d {
out, err = elem.MarshalSSZTo(out)
if err != nil {
return nil, err
@@ -312,7 +312,7 @@ func (d *DataColumnsByRootIdentifiers) MarshalSSZ() ([]byte, error) {
}
// MarshalSSZTo implements ssz.Marshaler. It appends the serialized DataColumnSidecarsByRootReq value to the provided byte slice.
func (d *DataColumnsByRootIdentifiers) MarshalSSZTo(dst []byte) ([]byte, error) {
func (d DataColumnsByRootIdentifiers) MarshalSSZTo(dst []byte) ([]byte, error) {
obj, err := d.MarshalSSZ()
if err != nil {
return nil, err
@@ -321,11 +321,11 @@ func (d *DataColumnsByRootIdentifiers) MarshalSSZTo(dst []byte) ([]byte, error)
}
// SizeSSZ implements ssz.Marshaler. It returns the size of the serialized representation.
func (d *DataColumnsByRootIdentifiers) SizeSSZ() int {
func (d DataColumnsByRootIdentifiers) SizeSSZ() int {
size := 0
for i := 0; i < len(*d); i++ {
for i := 0; i < len(d); i++ {
size += 4
size += (*d)[i].SizeSSZ()
size += (d)[i].SizeSSZ()
}
return size
}

View File

@@ -130,6 +130,7 @@ func (s *Service) rewardsEndpoints(blocker lookup.Blocker, stater lookup.Stater,
name: namespace + ".BlockRewards",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.BlockRewards,
methods: []string{http.MethodGet},
@@ -140,6 +141,7 @@ func (s *Service) rewardsEndpoints(blocker lookup.Blocker, stater lookup.Stater,
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.AttestationRewards,
methods: []string{http.MethodPost},
@@ -150,6 +152,7 @@ func (s *Service) rewardsEndpoints(blocker lookup.Blocker, stater lookup.Stater,
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.SyncCommitteeRewards,
methods: []string{http.MethodPost},
@@ -172,6 +175,7 @@ func (s *Service) builderEndpoints(stater lookup.Stater) []endpoint {
name: namespace + ".ExpectedWithdrawals",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.ExpectedWithdrawals,
methods: []string{http.MethodGet},
@@ -194,6 +198,7 @@ func (s *Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
name: namespace + ".Blobs",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.Blobs,
methods: []string{http.MethodGet},
@@ -237,6 +242,7 @@ func (s *Service) validatorEndpoints(
name: namespace + ".GetAggregateAttestation",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetAggregateAttestation,
methods: []string{http.MethodGet},
@@ -246,6 +252,7 @@ func (s *Service) validatorEndpoints(
name: namespace + ".GetAggregateAttestationV2",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetAggregateAttestationV2,
methods: []string{http.MethodGet},
@@ -256,6 +263,7 @@ func (s *Service) validatorEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.SubmitContributionAndProofs,
methods: []string{http.MethodPost},
@@ -267,6 +275,7 @@ func (s *Service) validatorEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.SubmitAggregateAndProofs,
methods: []string{http.MethodPost},
@@ -277,6 +286,7 @@ func (s *Service) validatorEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.SubmitAggregateAndProofsV2,
methods: []string{http.MethodPost},
@@ -286,6 +296,7 @@ func (s *Service) validatorEndpoints(
name: namespace + ".ProduceSyncCommitteeContribution",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.ProduceSyncCommitteeContribution,
methods: []string{http.MethodGet},
@@ -296,6 +307,7 @@ func (s *Service) validatorEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.SubmitSyncCommitteeSubscription,
methods: []string{http.MethodPost},
@@ -306,6 +318,7 @@ func (s *Service) validatorEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.SubmitBeaconCommitteeSubscription,
methods: []string{http.MethodPost},
@@ -315,6 +328,7 @@ func (s *Service) validatorEndpoints(
name: namespace + ".GetAttestationData",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetAttestationData,
methods: []string{http.MethodGet},
@@ -325,6 +339,7 @@ func (s *Service) validatorEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.RegisterValidator,
methods: []string{http.MethodPost},
@@ -335,6 +350,7 @@ func (s *Service) validatorEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetAttesterDuties,
methods: []string{http.MethodPost},
@@ -344,6 +360,7 @@ func (s *Service) validatorEndpoints(
name: namespace + ".GetProposerDuties",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetProposerDuties,
methods: []string{http.MethodGet},
@@ -354,6 +371,7 @@ func (s *Service) validatorEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetSyncCommitteeDuties,
methods: []string{http.MethodPost},
@@ -364,6 +382,7 @@ func (s *Service) validatorEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.PrepareBeaconProposer,
methods: []string{http.MethodPost},
@@ -374,6 +393,7 @@ func (s *Service) validatorEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetLiveness,
methods: []string{http.MethodPost},
@@ -383,6 +403,7 @@ func (s *Service) validatorEndpoints(
name: namespace + ".ProduceBlockV3",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.ProduceBlockV3,
methods: []string{http.MethodGet},
@@ -429,6 +450,7 @@ func (s *Service) nodeEndpoints() []endpoint {
name: namespace + ".GetSyncStatus",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetSyncStatus,
methods: []string{http.MethodGet},
@@ -438,6 +460,7 @@ func (s *Service) nodeEndpoints() []endpoint {
name: namespace + ".GetIdentity",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetIdentity,
methods: []string{http.MethodGet},
@@ -447,6 +470,7 @@ func (s *Service) nodeEndpoints() []endpoint {
name: namespace + ".GetPeer",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetPeer,
methods: []string{http.MethodGet},
@@ -456,6 +480,7 @@ func (s *Service) nodeEndpoints() []endpoint {
name: namespace + ".GetPeers",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetPeers,
methods: []string{http.MethodGet},
@@ -465,6 +490,7 @@ func (s *Service) nodeEndpoints() []endpoint {
name: namespace + ".GetPeerCount",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetPeerCount,
methods: []string{http.MethodGet},
@@ -474,6 +500,7 @@ func (s *Service) nodeEndpoints() []endpoint {
name: namespace + ".GetVersion",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetVersion,
methods: []string{http.MethodGet},
@@ -483,6 +510,7 @@ func (s *Service) nodeEndpoints() []endpoint {
name: namespace + ".GetHealth",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetHealth,
methods: []string{http.MethodGet},
@@ -533,6 +561,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetCommittees",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetCommittees,
methods: []string{http.MethodGet},
@@ -542,6 +571,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetStateFork",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetStateFork,
methods: []string{http.MethodGet},
@@ -551,6 +581,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetStateRoot",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetStateRoot,
methods: []string{http.MethodGet},
@@ -560,6 +591,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetSyncCommittees",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetSyncCommittees,
methods: []string{http.MethodGet},
@@ -569,6 +601,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetRandao",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetRandao,
methods: []string{http.MethodGet},
@@ -580,6 +613,7 @@ func (s *Service) beaconEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.PublishBlock,
methods: []string{http.MethodPost},
@@ -591,6 +625,7 @@ func (s *Service) beaconEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.PublishBlindedBlock,
methods: []string{http.MethodPost},
@@ -601,6 +636,7 @@ func (s *Service) beaconEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.PublishBlockV2,
methods: []string{http.MethodPost},
@@ -611,6 +647,7 @@ func (s *Service) beaconEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.PublishBlindedBlockV2,
methods: []string{http.MethodPost},
@@ -620,6 +657,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetBlockV2",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetBlockV2,
methods: []string{http.MethodGet},
@@ -630,6 +668,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetBlockAttestations",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetBlockAttestations,
methods: []string{http.MethodGet},
@@ -639,6 +678,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetBlockAttestationsV2",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetBlockAttestationsV2,
methods: []string{http.MethodGet},
@@ -648,6 +688,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetBlindedBlock",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetBlindedBlock,
methods: []string{http.MethodGet},
@@ -657,6 +698,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetBlockRoot",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetBlockRoot,
methods: []string{http.MethodGet},
@@ -667,6 +709,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".ListAttestations",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.ListAttestations,
methods: []string{http.MethodGet},
@@ -676,6 +719,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".ListAttestationsV2",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.ListAttestationsV2,
methods: []string{http.MethodGet},
@@ -686,6 +730,7 @@ func (s *Service) beaconEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.SubmitAttestations,
methods: []string{http.MethodPost},
@@ -696,6 +741,7 @@ func (s *Service) beaconEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.SubmitAttestationsV2,
methods: []string{http.MethodPost},
@@ -705,6 +751,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".ListVoluntaryExits",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.ListVoluntaryExits,
methods: []string{http.MethodGet},
@@ -715,6 +762,7 @@ func (s *Service) beaconEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.SubmitVoluntaryExit,
methods: []string{http.MethodPost},
@@ -725,6 +773,7 @@ func (s *Service) beaconEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.SubmitSyncCommitteeSignatures,
methods: []string{http.MethodPost},
@@ -734,6 +783,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".ListBLSToExecutionChanges",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.ListBLSToExecutionChanges,
methods: []string{http.MethodGet},
@@ -744,6 +794,7 @@ func (s *Service) beaconEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.SubmitBLSToExecutionChanges,
methods: []string{http.MethodPost},
@@ -754,6 +805,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetAttesterSlashings",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetAttesterSlashings,
methods: []string{http.MethodGet},
@@ -763,6 +815,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetAttesterSlashingsV2",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetAttesterSlashingsV2,
methods: []string{http.MethodGet},
@@ -773,6 +826,7 @@ func (s *Service) beaconEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.SubmitAttesterSlashings,
methods: []string{http.MethodPost},
@@ -783,6 +837,7 @@ func (s *Service) beaconEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.SubmitAttesterSlashingsV2,
methods: []string{http.MethodPost},
@@ -792,6 +847,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetProposerSlashings",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetProposerSlashings,
methods: []string{http.MethodGet},
@@ -802,6 +858,7 @@ func (s *Service) beaconEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.SubmitProposerSlashing,
methods: []string{http.MethodPost},
@@ -811,6 +868,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetBlockHeaders",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetBlockHeaders,
methods: []string{http.MethodGet},
@@ -820,6 +878,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetBlockHeader",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetBlockHeader,
methods: []string{http.MethodGet},
@@ -829,6 +888,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetGenesis",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetGenesis,
methods: []string{http.MethodGet},
@@ -838,6 +898,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetFinalityCheckpoints",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetFinalityCheckpoints,
methods: []string{http.MethodGet},
@@ -848,6 +909,7 @@ func (s *Service) beaconEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetValidators,
methods: []string{http.MethodGet, http.MethodPost},
@@ -857,6 +919,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetValidator",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetValidator,
methods: []string{http.MethodGet},
@@ -867,6 +930,7 @@ func (s *Service) beaconEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetValidatorBalances,
methods: []string{http.MethodGet, http.MethodPost},
@@ -887,6 +951,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetDepositSnapshot",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetDepositSnapshot,
methods: []string{http.MethodGet},
@@ -896,6 +961,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetPendingDeposits",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetPendingDeposits,
methods: []string{http.MethodGet},
@@ -914,6 +980,7 @@ func (s *Service) beaconEndpoints(
name: namespace + ".GetPendingPartialWithdrawals",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetPendingPartialWithdrawals,
methods: []string{http.MethodGet},
@@ -929,6 +996,7 @@ func (*Service) configEndpoints() []endpoint {
name: namespace + ".GetDepositContract",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: config.GetDepositContract,
methods: []string{http.MethodGet},
@@ -938,6 +1006,7 @@ func (*Service) configEndpoints() []endpoint {
name: namespace + ".GetForkSchedule",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: config.GetForkSchedule,
methods: []string{http.MethodGet},
@@ -947,6 +1016,7 @@ func (*Service) configEndpoints() []endpoint {
name: namespace + ".GetSpec",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: config.GetSpec,
methods: []string{http.MethodGet},
@@ -971,6 +1041,7 @@ func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Sta
name: namespace + ".GetLightClientBootstrap",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetLightClientBootstrap,
methods: []string{http.MethodGet},
@@ -980,6 +1051,7 @@ func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Sta
name: namespace + ".GetLightClientUpdatesByRange",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetLightClientUpdatesByRange,
methods: []string{http.MethodGet},
@@ -989,6 +1061,7 @@ func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Sta
name: namespace + ".GetLightClientFinalityUpdate",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetLightClientFinalityUpdate,
methods: []string{http.MethodGet},
@@ -998,6 +1071,7 @@ func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Sta
name: namespace + ".GetLightClientOptimisticUpdate",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetLightClientOptimisticUpdate,
methods: []string{http.MethodGet},
@@ -1024,6 +1098,7 @@ func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
name: namespace + ".GetBeaconStateV2",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetBeaconStateV2,
methods: []string{http.MethodGet},
@@ -1033,6 +1108,7 @@ func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
name: namespace + ".GetForkChoiceHeadsV2",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetForkChoiceHeadsV2,
methods: []string{http.MethodGet},
@@ -1042,6 +1118,7 @@ func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
name: namespace + ".GetForkChoice",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetForkChoice,
methods: []string{http.MethodGet},
@@ -1066,6 +1143,7 @@ func (s *Service) eventsEndpoints() []endpoint {
name: namespace + ".StreamEvents",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.EventStreamMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.StreamEvents,
methods: []string{http.MethodGet},
@@ -1101,6 +1179,7 @@ func (s *Service) prysmBeaconEndpoints(
name: namespace + ".GetWeakSubjectivity",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetWeakSubjectivity,
methods: []string{http.MethodGet},
@@ -1110,6 +1189,7 @@ func (s *Service) prysmBeaconEndpoints(
name: namespace + ".GetValidatorCount",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetValidatorCount,
methods: []string{http.MethodGet},
@@ -1119,6 +1199,7 @@ func (s *Service) prysmBeaconEndpoints(
name: namespace + ".GetValidatorCount",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetValidatorCount,
methods: []string{http.MethodGet},
@@ -1129,6 +1210,7 @@ func (s *Service) prysmBeaconEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetIndividualVotes,
methods: []string{http.MethodPost},
@@ -1138,6 +1220,7 @@ func (s *Service) prysmBeaconEndpoints(
name: namespace + ".GetChainHead",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetChainHead,
methods: []string{http.MethodGet},
@@ -1148,6 +1231,7 @@ func (s *Service) prysmBeaconEndpoints(
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.PublishBlobs,
methods: []string{http.MethodPost},
@@ -1175,6 +1259,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
name: namespace + ".ListTrustedPeer",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.ListTrustedPeer,
methods: []string{http.MethodGet},
@@ -1184,6 +1269,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
name: namespace + ".ListTrustedPeer",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.ListTrustedPeer,
methods: []string{http.MethodGet},
@@ -1194,6 +1280,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.AddTrustedPeer,
methods: []string{http.MethodPost},
@@ -1204,6 +1291,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.AddTrustedPeer,
methods: []string{http.MethodPost},
@@ -1213,6 +1301,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
name: namespace + ".RemoveTrustedPeer",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.RemoveTrustedPeer,
methods: []string{http.MethodDelete},
@@ -1222,6 +1311,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
name: namespace + ".RemoveTrustedPeer",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.RemoveTrustedPeer,
methods: []string{http.MethodDelete},
@@ -1244,6 +1334,7 @@ func (s *Service) prysmValidatorEndpoints(stater lookup.Stater, coreService *cor
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetPerformance,
methods: []string{http.MethodPost},
@@ -1254,6 +1345,7 @@ func (s *Service) prysmValidatorEndpoints(stater lookup.Stater, coreService *cor
middleware: []middleware.Middleware{
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetPerformance,
methods: []string{http.MethodPost},
@@ -1263,6 +1355,7 @@ func (s *Service) prysmValidatorEndpoints(stater lookup.Stater, coreService *cor
name: namespace + ".GetParticipation",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetParticipation,
methods: []string{http.MethodGet},
@@ -1272,6 +1365,7 @@ func (s *Service) prysmValidatorEndpoints(stater lookup.Stater, coreService *cor
name: namespace + ".GetActiveSetChanges",
middleware: []middleware.Middleware{
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
middleware.AcceptEncodingHeaderHandler(),
},
handler: server.GetActiveSetChanges,
methods: []string{http.MethodGet},

View File

@@ -9,6 +9,7 @@ go_library(
"//api/server/structs:go_default_library",
"//config/params:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//network/forks:go_default_library",
"//network/httputil:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
],
@@ -21,6 +22,9 @@ go_test(
deps = [
"//api/server/structs:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/forks:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",

View File

@@ -10,6 +10,7 @@ import (
"github.com/OffchainLabs/prysm/v6/api/server/structs"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/network/httputil"
"github.com/ethereum/go-ethereum/common/hexutil"
)
@@ -32,25 +33,34 @@ func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
_, span := trace.StartSpan(r.Context(), "config.GetForkSchedule")
defer span.End()
schedule := params.SortedForkSchedule()
data := make([]*structs.Fork, 0, len(schedule))
schedule := params.BeaconConfig().ForkVersionSchedule
if len(schedule) == 0 {
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
Data: data,
Data: make([]*structs.Fork, 0),
})
return
}
previous := schedule[0]
for _, entry := range schedule {
data = append(data, &structs.Fork{
PreviousVersion: hexutil.Encode(previous.ForkVersion[:]),
CurrentVersion: hexutil.Encode(entry.ForkVersion[:]),
Epoch: fmt.Sprintf("%d", entry.Epoch),
})
previous = entry
versions := forks.SortedForkVersions(schedule)
chainForks := make([]*structs.Fork, len(schedule))
var previous, current []byte
for i, v := range versions {
if i == 0 {
previous = params.BeaconConfig().GenesisForkVersion
} else {
previous = current
}
copyV := v
current = copyV[:]
chainForks[i] = &structs.Fork{
PreviousVersion: hexutil.Encode(previous),
CurrentVersion: hexutil.Encode(current),
Epoch: fmt.Sprintf("%d", schedule[v]),
}
}
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
Data: data,
Data: chainForks,
})
}
@@ -70,8 +80,8 @@ func GetSpec(w http.ResponseWriter, r *http.Request) {
httputil.WriteJson(w, &structs.GetSpecResponse{Data: data})
}
func prepareConfigSpec() (map[string]interface{}, error) {
data := make(map[string]interface{})
func prepareConfigSpec() (map[string]string, error) {
data := make(map[string]string)
config := *params.BeaconConfig()
t := reflect.TypeOf(config)
v := reflect.ValueOf(config)
@@ -92,16 +102,7 @@ func prepareConfigSpec() (map[string]interface{}, error) {
case reflect.Uint64:
data[tagValue] = strconv.FormatUint(vField.Uint(), 10)
case reflect.Slice:
// Handle byte slices with hexutil.Encode
if vField.Type().Elem().Kind() == reflect.Uint8 {
data[tagValue] = hexutil.Encode(vField.Bytes())
} else if vField.Type().Elem().Kind() == reflect.Struct {
// Handle struct slices - convert numeric fields to strings for consistent JSON output
data[tagValue] = convertStructSliceForJSON(vField)
} else {
// Handle other slice types - return as interface{} for JSON serialization
data[tagValue] = vField.Interface()
}
data[tagValue] = hexutil.Encode(vField.Bytes())
case reflect.Array:
data[tagValue] = hexutil.Encode(reflect.ValueOf(&config).Elem().Field(i).Slice(0, vField.Len()).Bytes())
case reflect.String:
@@ -115,45 +116,3 @@ func prepareConfigSpec() (map[string]interface{}, error) {
return data, nil
}
// convertStructSliceForJSON converts struct slices to ensure numeric fields are strings
func convertStructSliceForJSON(sliceValue reflect.Value) []map[string]interface{} {
length := sliceValue.Len()
result := make([]map[string]interface{}, length)
for i := 0; i < length; i++ {
elem := sliceValue.Index(i)
elemType := elem.Type()
elemMap := make(map[string]interface{})
for j := 0; j < elem.NumField(); j++ {
field := elem.Field(j)
fieldType := elemType.Field(j)
// Skip unexported fields
if !field.CanInterface() {
continue
}
// Get JSON tag name (fallback to field name if no JSON tag)
jsonTag := fieldType.Tag.Get("json")
if jsonTag == "" || jsonTag == "-" {
jsonTag = fieldType.Name
}
// Convert numeric types to strings for consistent JSON output
switch field.Kind() {
case reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uint8:
elemMap[jsonTag] = strconv.FormatUint(field.Uint(), 10)
case reflect.Int64, reflect.Int, reflect.Int32, reflect.Int16, reflect.Int8:
elemMap[jsonTag] = strconv.FormatInt(field.Int(), 10)
default:
elemMap[jsonTag] = field.Interface()
}
}
result[i] = elemMap
}
return result
}

View File

@@ -12,6 +12,8 @@ import (
"github.com/OffchainLabs/prysm/v6/api/server/structs"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/ethereum/go-ethereum/common"
@@ -198,7 +200,7 @@ func TestGetSpec(t *testing.T) {
data, ok := resp.Data.(map[string]interface{})
require.Equal(t, true, ok)
assert.Equal(t, 176, len(data))
assert.Equal(t, 175, len(data))
for k, v := range data {
t.Run(k, func(t *testing.T) {
switch k {
@@ -575,11 +577,6 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "102", v)
case "BLOB_SIDECAR_SUBNET_COUNT_ELECTRA":
assert.Equal(t, "103", v)
case "BLOB_SCHEDULE":
// BLOB_SCHEDULE should be an empty slice when no schedule is defined
blobSchedule, ok := v.([]interface{})
assert.Equal(t, true, ok)
assert.Equal(t, 0, len(blobSchedule))
default:
t.Errorf("Incorrect key: %s", k)
}
@@ -589,34 +586,43 @@ func TestGetSpec(t *testing.T) {
func TestForkSchedule_Ok(t *testing.T) {
t.Run("ok", func(t *testing.T) {
genesisForkVersion := []byte("Genesis")
firstForkVersion, firstForkEpoch := []byte("Firs"), primitives.Epoch(100)
secondForkVersion, secondForkEpoch := []byte("Seco"), primitives.Epoch(200)
thirdForkVersion, thirdForkEpoch := []byte("Thir"), primitives.Epoch(300)
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig().Copy()
config.InitializeForkSchedule()
config.GenesisForkVersion = genesisForkVersion
// Create fork schedule adding keys in non-sorted order.
schedule := make(map[[4]byte]primitives.Epoch, 3)
schedule[bytesutil.ToBytes4(secondForkVersion)] = secondForkEpoch
schedule[bytesutil.ToBytes4(firstForkVersion)] = firstForkEpoch
schedule[bytesutil.ToBytes4(thirdForkVersion)] = thirdForkEpoch
config.ForkVersionSchedule = schedule
params.OverrideBeaconConfig(config)
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/fork_schedule", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
genesisStr, firstStr, secondStr := hexutil.Encode(config.GenesisForkVersion), hexutil.Encode(config.AltairForkVersion), hexutil.Encode(config.BellatrixForkVersion)
GetForkSchedule(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetForkScheduleResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
schedule := params.SortedForkSchedule()
require.Equal(t, len(schedule), len(resp.Data))
require.Equal(t, 3, len(resp.Data))
fork := resp.Data[0]
assert.Equal(t, genesisStr, fork.PreviousVersion)
assert.Equal(t, genesisStr, fork.CurrentVersion)
assert.Equal(t, fmt.Sprintf("%d", config.GenesisEpoch), fork.Epoch)
assert.DeepEqual(t, hexutil.Encode(genesisForkVersion), fork.PreviousVersion)
assert.DeepEqual(t, hexutil.Encode(firstForkVersion), fork.CurrentVersion)
assert.Equal(t, fmt.Sprintf("%d", firstForkEpoch), fork.Epoch)
fork = resp.Data[1]
assert.Equal(t, genesisStr, fork.PreviousVersion)
assert.Equal(t, firstStr, fork.CurrentVersion)
assert.Equal(t, fmt.Sprintf("%d", config.AltairForkEpoch), fork.Epoch)
assert.DeepEqual(t, hexutil.Encode(firstForkVersion), fork.PreviousVersion)
assert.DeepEqual(t, hexutil.Encode(secondForkVersion), fork.CurrentVersion)
assert.Equal(t, fmt.Sprintf("%d", secondForkEpoch), fork.Epoch)
fork = resp.Data[2]
assert.Equal(t, firstStr, fork.PreviousVersion)
assert.Equal(t, secondStr, fork.CurrentVersion)
assert.Equal(t, fmt.Sprintf("%d", config.BellatrixForkEpoch), fork.Epoch)
assert.DeepEqual(t, hexutil.Encode(secondForkVersion), fork.PreviousVersion)
assert.DeepEqual(t, hexutil.Encode(thirdForkVersion), fork.CurrentVersion)
assert.Equal(t, fmt.Sprintf("%d", thirdForkEpoch), fork.Epoch)
})
t.Run("correct number of forks", func(t *testing.T) {
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/fork_schedule", nil)
@@ -627,64 +633,7 @@ func TestForkSchedule_Ok(t *testing.T) {
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetForkScheduleResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
os := params.SortedForkSchedule()
assert.Equal(t, len(os), len(resp.Data))
os := forks.NewOrderedSchedule(params.BeaconConfig())
assert.Equal(t, os.Len(), len(resp.Data))
})
}
func TestGetSpec_BlobSchedule(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig().Copy()
// Set up a blob schedule with test data
config.BlobSchedule = []params.BlobScheduleEntry{
{
Epoch: primitives.Epoch(100),
MaxBlobsPerBlock: 6,
},
{
Epoch: primitives.Epoch(200),
MaxBlobsPerBlock: 9,
},
}
params.OverrideBeaconConfig(config)
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/spec", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
GetSpec(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := structs.GetSpecResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
data, ok := resp.Data.(map[string]interface{})
require.Equal(t, true, ok)
// Verify BLOB_SCHEDULE is present and properly formatted
blobScheduleValue, exists := data["BLOB_SCHEDULE"]
require.Equal(t, true, exists)
// Verify it's a slice of maps (actual JSON object, not string)
// The JSON unmarshaling converts it to []interface{} with map[string]interface{} entries
blobScheduleSlice, ok := blobScheduleValue.([]interface{})
require.Equal(t, true, ok)
// Convert to generic interface for easier testing
var blobSchedule []map[string]interface{}
for _, entry := range blobScheduleSlice {
entryMap, ok := entry.(map[string]interface{})
require.Equal(t, true, ok)
blobSchedule = append(blobSchedule, entryMap)
}
// Verify the blob schedule content
require.Equal(t, 2, len(blobSchedule))
// Check first entry - values should be strings for consistent API output
assert.Equal(t, "100", blobSchedule[0]["EPOCH"])
assert.Equal(t, "6", blobSchedule[0]["MAX_BLOBS_PER_BLOCK"])
// Check second entry - values should be strings for consistent API output
assert.Equal(t, "200", blobSchedule[1]["EPOCH"])
assert.Equal(t, "9", blobSchedule[1]["MAX_BLOBS_PER_BLOCK"])
}

View File

@@ -20,6 +20,7 @@ go_library(
"//config/params:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//network/forks:go_default_library",
"//network/httputil:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
@@ -41,9 +42,9 @@ go_test(
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/light-client:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/light-client:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -11,6 +11,7 @@ import (
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v6/network/forks"
"github.com/OffchainLabs/prysm/v6/network/httputil"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
@@ -114,7 +115,7 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
updateSlot := update.AttestedHeader().Beacon().Slot
updateEpoch := slots.ToEpoch(updateSlot)
updateFork, err := params.Fork(updateEpoch)
updateFork, err := forks.Fork(updateEpoch)
if err != nil {
httputil.HandleError(w, "Could not get fork Version: "+err.Error(), http.StatusInternalServerError)
return

File diff suppressed because it is too large Load Diff

View File

@@ -40,6 +40,7 @@ go_test(
deps = [
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/rpc/core:go_default_library",
@@ -48,18 +49,15 @@ go_test(
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/state/stategen/mock:go_default_library",
"//beacon-chain/verification:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -133,137 +133,6 @@ func (p *BeaconDbBlocker) Block(ctx context.Context, id []byte) (interfaces.Read
return blk, nil
}
// blobsFromStoredBlobs retrieves blobs corresponding to `indices` and `root` from the store.
// This function expects blobs to be stored directly (aka. no data columns).
func (p *BeaconDbBlocker) blobsFromStoredBlobs(indices []int, root []byte, commitments [][]byte) ([]*blocks.VerifiedROBlob, *core.RpcError) {
sum := p.BlobStorage.Summary(bytesutil.ToBytes32(root))
if len(indices) == 0 {
for index := range commitments {
if sum.HasIndex(uint64(index)) {
indices = append(indices, index)
}
}
} else {
for _, index := range indices {
if uint64(index) >= sum.MaxBlobsForEpoch() {
return nil, &core.RpcError{
Err: fmt.Errorf("requested index %d is bigger than the maximum possible blob count %d", index, sum.MaxBlobsForEpoch()),
Reason: core.BadRequest,
}
}
if !sum.HasIndex(uint64(index)) {
return nil, &core.RpcError{
Err: fmt.Errorf("requested index %d not found", index),
Reason: core.NotFound,
}
}
}
}
blobs := make([]*blocks.VerifiedROBlob, 0, len(indices))
for _, index := range indices {
vblob, err := p.BlobStorage.Get(bytesutil.ToBytes32(root), uint64(index))
if err != nil {
return nil, &core.RpcError{
Err: fmt.Errorf("could not retrieve blob for block root %#x at index %d", root, index),
Reason: core.Internal,
}
}
blobs = append(blobs, &vblob)
}
return blobs, nil
}
// blobsFromStoredDataColumns retrieves data columns from the store, reconstruct the whole matrix if needed, convert the matrix to blobs,
// and then returns blobs corresponding to `indices` and `root` from the store,
// This function expects data columns to be stored (aka. no blobs).
// If not enough data columns are available to extract blobs from them (either directly or after reconstruction), an error is returned.
func (p *BeaconDbBlocker) blobsFromStoredDataColumns(block blocks.ROBlock, indices []int, rootBytes []byte) ([]*blocks.VerifiedROBlob, *core.RpcError) {
root := bytesutil.ToBytes32(rootBytes)
// Use all indices if none are provided.
if len(indices) == 0 {
commitments, err := block.Block().Body().BlobKzgCommitments()
if err != nil {
return nil, &core.RpcError{
Err: errors.Wrap(err, "could not retrieve blob commitments"),
Reason: core.Internal,
}
}
for index := range commitments {
indices = append(indices, index)
}
}
// Count how many columns we have in the store.
summary := p.DataColumnStorage.Summary(root)
stored := summary.Stored()
count := uint64(len(stored))
if count < peerdas.MinimumColumnsCountToReconstruct() {
// There is no way to reconstruct the data columns.
return nil, &core.RpcError{
Err: errors.Errorf("the node does not custody enough data columns to reconstruct blobs. Please start the beacon node with the `--%s` flag to ensure this call to success, or retry later if it is already the case.", flags.SubscribeAllDataSubnets.Name),
Reason: core.NotFound,
}
}
// Retrieve from the database needed data columns.
verifiedRoDataColumnSidecars, err := p.neededDataColumnSidecars(root, stored)
if err != nil {
return nil, &core.RpcError{
Err: errors.Wrap(err, "needed data column sidecars"),
Reason: core.Internal,
}
}
verifiedRoBlobSidecars, err := peerdas.ReconstructBlobs(block, verifiedRoDataColumnSidecars, indices)
if err != nil {
return nil, &core.RpcError{
Err: errors.Wrap(err, "blobs from data columns"),
Reason: core.Internal,
}
}
return verifiedRoBlobSidecars, nil
}
func (p *BeaconDbBlocker) neededDataColumnSidecars(root [fieldparams.RootLength]byte, stored map[uint64]bool) ([]blocks.VerifiedRODataColumn, error) {
// Check if we have all the non-extended data columns.
cellsPerBlob := fieldparams.CellsPerBlob
blobIndices := make([]uint64, 0, cellsPerBlob)
hasAllBlobColumns := true
for i := range uint64(cellsPerBlob) {
if !stored[i] {
hasAllBlobColumns = false
break
}
blobIndices = append(blobIndices, i)
}
if hasAllBlobColumns {
// Retrieve only the non-extended data columns.
verifiedRoSidecars, err := p.DataColumnStorage.Get(root, blobIndices)
if err != nil {
return nil, errors.Wrap(err, "data columns storage get")
}
return verifiedRoSidecars, nil
}
// Retrieve all the data columns.
verifiedRoSidecars, err := p.DataColumnStorage.Get(root, nil)
if err != nil {
return nil, errors.Wrap(err, "data columns storage get")
}
return verifiedRoSidecars, nil
}
// Blobs returns the blobs for a given block id identifier and blob indices. The identifier can be one of:
// - "head" (canonical head in node's view)
// - "genesis"
@@ -368,18 +237,13 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []int) (
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to retrieve kzg commitments from block %#x", rootSlice), Reason: core.Internal}
}
// if there are no commitments return 200 w/ empty list
// If there are no commitments return 200 w/ empty list
if len(commitments) == 0 {
return make([]*blocks.VerifiedROBlob, 0), nil
}
// Get the slot of the block.
blockSlot := roBlock.Slot()
// Get the first peerDAS epoch.
// Compute the first Fulu slot.
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
// Compute the first peerDAS slot.
fuluForkSlot := primitives.Slot(math.MaxUint64)
if fuluForkEpoch != primitives.Epoch(math.MaxUint64) {
fuluForkSlot, err = slots.EpochStart(fuluForkEpoch)
@@ -388,14 +252,154 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []int) (
}
}
if blockSlot >= fuluForkSlot {
if roBlock.Slot() >= fuluForkSlot {
roBlock, err := blocks.NewROBlockWithRoot(roSignedBlock, root)
if err != nil {
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to create roBlock with root %#x", root), Reason: core.Internal}
}
return p.blobsFromStoredDataColumns(roBlock, indices, rootSlice)
return p.blobsFromStoredDataColumns(roBlock, indices)
}
return p.blobsFromStoredBlobs(indices, rootSlice, commitments)
return p.blobsFromStoredBlobs(commitments, root, indices)
}
// blobsFromStoredBlobs retrieves blob sidercars corresponding to `indices` and `root` from the store.
// This function expects blob sidecars to be stored (aka. no data column sidecars).
func (p *BeaconDbBlocker) blobsFromStoredBlobs(commitments [][]byte, root [fieldparams.RootLength]byte, indices []int) ([]*blocks.VerifiedROBlob, *core.RpcError) {
summary := p.BlobStorage.Summary(root)
maxBlobCount := summary.MaxBlobsForEpoch()
for _, index := range indices {
if uint64(index) >= maxBlobCount {
return nil, &core.RpcError{
Err: fmt.Errorf("requested index %d is bigger than the maximum possible blob count %d", index, maxBlobCount),
Reason: core.BadRequest,
}
}
if !summary.HasIndex(uint64(index)) {
return nil, &core.RpcError{
Err: fmt.Errorf("requested index %d not found", index),
Reason: core.NotFound,
}
}
}
// If no indices are provided, use all indices that are available in the summary.
if len(indices) == 0 {
for index := range commitments {
if summary.HasIndex(uint64(index)) {
indices = append(indices, index)
}
}
}
// Retrieve blob sidecars from the store.
blobs := make([]*blocks.VerifiedROBlob, 0, len(indices))
for _, index := range indices {
blobSidecar, err := p.BlobStorage.Get(root, uint64(index))
if err != nil {
return nil, &core.RpcError{
Err: fmt.Errorf("could not retrieve blob for block root %#x at index %d", root, index),
Reason: core.Internal,
}
}
blobs = append(blobs, &blobSidecar)
}
return blobs, nil
}
// blobsFromStoredDataColumns retrieves data column sidecars from the store,
// reconstructs the whole matrix if needed, converts the matrix to blobs,
// and then returns converted blobs corresponding to `indices` and `root`.
// This function expects data column sidecars to be stored (aka. no blob sidecars).
// If not enough data column sidecars are available to convert blobs from them
// (either directly or after reconstruction), an error is returned.
func (p *BeaconDbBlocker) blobsFromStoredDataColumns(block blocks.ROBlock, indices []int) ([]*blocks.VerifiedROBlob, *core.RpcError) {
root := block.Root()
// Use all indices if none are provided.
if len(indices) == 0 {
commitments, err := block.Block().Body().BlobKzgCommitments()
if err != nil {
return nil, &core.RpcError{
Err: errors.Wrap(err, "could not retrieve blob commitments"),
Reason: core.Internal,
}
}
for index := range commitments {
indices = append(indices, index)
}
}
// Count how many columns we have in the store.
summary := p.DataColumnStorage.Summary(root)
stored := summary.Stored()
count := uint64(len(stored))
if count < peerdas.MinimumColumnsCountToReconstruct() {
// There is no way to reconstruct the data columns.
return nil, &core.RpcError{
Err: errors.Errorf("the node does not custody enough data columns to reconstruct blobs - please start the beacon node with the `--%s` flag to ensure this call to succeed, or retry later if it is already the case", flags.SubscribeAllDataSubnets.Name),
Reason: core.NotFound,
}
}
// Retrieve from the database needed data columns.
verifiedRoDataColumnSidecars, err := p.neededDataColumnSidecars(root, stored)
if err != nil {
return nil, &core.RpcError{
Err: errors.Wrap(err, "needed data column sidecars"),
Reason: core.Internal,
}
}
// Reconstruct blob sidecars from data column sidecars.
verifiedRoBlobSidecars, err := peerdas.ReconstructBlobs(block, verifiedRoDataColumnSidecars, indices)
if err != nil {
return nil, &core.RpcError{
Err: errors.Wrap(err, "blobs from data columns"),
Reason: core.Internal,
}
}
return verifiedRoBlobSidecars, nil
}
// neededDataColumnSidecars retrieves all data column sidecars corresponding to (non extended) blobs if available,
// else retrieves all data column sidecars from the store.
func (p *BeaconDbBlocker) neededDataColumnSidecars(root [fieldparams.RootLength]byte, stored map[uint64]bool) ([]blocks.VerifiedRODataColumn, error) {
// Check if we have all the non-extended data columns.
cellsPerBlob := fieldparams.CellsPerBlob
blobIndices := make([]uint64, 0, cellsPerBlob)
hasAllBlobColumns := true
for i := range uint64(cellsPerBlob) {
if !stored[i] {
hasAllBlobColumns = false
break
}
blobIndices = append(blobIndices, i)
}
if hasAllBlobColumns {
// Retrieve only the non-extended data columns.
verifiedRoSidecars, err := p.DataColumnStorage.Get(root, blobIndices)
if err != nil {
return nil, errors.Wrap(err, "data columns storage get")
}
return verifiedRoSidecars, nil
}
// Retrieve all the data columns.
verifiedRoSidecars, err := p.DataColumnStorage.Get(root, nil)
if err != nil {
return nil, errors.Wrap(err, "data columns storage get")
}
return verifiedRoSidecars, nil
}

View File

@@ -1,9 +1,6 @@
package lookup
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"fmt"
"math"
"net/http"
@@ -13,23 +10,21 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/testutil"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
GoKZG "github.com/crate-crypto/go-kzg-4844"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/sirupsen/logrus"
)
func TestGetBlock(t *testing.T) {
@@ -165,285 +160,336 @@ func TestGetBlock(t *testing.T) {
}
}
func deterministicRandomness(seed int64) [32]byte {
// Converts an int64 to a byte slice
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.BigEndian, seed)
if err != nil {
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
return [32]byte{}
}
bytes := buf.Bytes()
return sha256.Sum256(bytes)
}
// Returns a serialized random field element in big-endian
func getRandFieldElement(seed int64) [32]byte {
bytes := deterministicRandomness(seed)
var r fr.Element
r.SetBytes(bytes[:])
return GoKZG.SerializeScalar(r)
}
// Returns a random blob using the passed seed as entropy
func getRandBlob(seed int64) kzg.Blob {
var blob kzg.Blob
for i := 0; i < len(blob); i += 32 {
fieldElementBytes := getRandFieldElement(seed + int64(i))
copy(blob[i:i+32], fieldElementBytes[:])
}
return blob
}
func generateCommitmentAndProof(blob *kzg.Blob) (*kzg.Commitment, *kzg.Proof, error) {
commitment, err := kzg.BlobToKZGCommitment(blob)
if err != nil {
return nil, nil, err
}
proof, err := kzg.ComputeBlobKZGProof(blob, commitment)
if err != nil {
return nil, nil, err
}
return &commitment, &proof, err
}
func generateRandomBlocSignedBeaconBlockkAndVerifiedRoBlobs(t *testing.T, blobCount int) (interfaces.SignedBeaconBlock, []*blocks.VerifiedROBlob) {
// Create a protobuf signed beacon block.
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
// Generate random blobs and their corresponding commitments and proofs.
blobs := make([]kzg.Blob, 0, blobCount)
blobKzgCommitments := make([]*kzg.Commitment, 0, blobCount)
blobKzgProofs := make([]*kzg.Proof, 0, blobCount)
for blobIndex := range blobCount {
// Create a random blob.
blob := getRandBlob(int64(blobIndex))
blobs = append(blobs, blob)
// Generate a blobKZGCommitment for the blob.
blobKZGCommitment, proof, err := generateCommitmentAndProof(&blob)
require.NoError(t, err)
blobKzgCommitments = append(blobKzgCommitments, blobKZGCommitment)
blobKzgProofs = append(blobKzgProofs, proof)
}
// Set the commitments into the block.
blobZkgCommitmentsBytes := make([][]byte, 0, blobCount)
for _, blobKZGCommitment := range blobKzgCommitments {
blobZkgCommitmentsBytes = append(blobZkgCommitmentsBytes, blobKZGCommitment[:])
}
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobZkgCommitmentsBytes
// Generate verified RO blobs.
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
// Create a signed beacon block from the protobuf.
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
require.NoError(t, err)
commitmentInclusionProof, err := blocks.MerkleProofKZGCommitments(signedBeaconBlock.Block().Body())
require.NoError(t, err)
for blobIndex := range blobCount {
blob := blobs[blobIndex]
blobKZGCommitment := blobKzgCommitments[blobIndex]
blobKzgProof := blobKzgProofs[blobIndex]
// Get the signed beacon block header.
signedBeaconBlockHeader, err := signedBeaconBlock.Header()
require.NoError(t, err)
blobSidecar := &ethpb.BlobSidecar{
Index: uint64(blobIndex),
Blob: blob[:],
KzgCommitment: blobKZGCommitment[:],
KzgProof: blobKzgProof[:],
SignedBlockHeader: signedBeaconBlockHeader,
CommitmentInclusionProof: commitmentInclusionProof,
}
roBlob, err := blocks.NewROBlob(blobSidecar)
require.NoError(t, err)
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
}
return signedBeaconBlock, verifiedROBlobs
}
func TestGetBlob(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.DenebForkEpoch = 1
params.OverrideBeaconConfig(cfg)
const (
slot = 123
blobCount = 4
denebForEpoch = 1
fuluForkEpoch = 2
)
setupDeneb := func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.DenebForkEpoch = denebForEpoch
params.OverrideBeaconConfig(cfg)
}
setupFulu := func(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.DenebForkEpoch = denebForEpoch
cfg.FuluForkEpoch = fuluForkEpoch
params.OverrideBeaconConfig(cfg)
}
ctx := t.Context()
db := testDB.SetupDB(t)
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4)
require.NoError(t, db.SaveBlock(t.Context(), denebBlock))
_, bs := filesystem.NewEphemeralBlobStorageAndFs(t)
testSidecars := verification.FakeVerifySliceForTest(t, blobs)
for i := range testSidecars {
require.NoError(t, bs.Save(testSidecars[i]))
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
// Create and save Deneb block and blob sidecars.
_, blobStorage := filesystem.NewEphemeralBlobStorageAndFs(t)
denebBlock, storedBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [fieldparams.RootLength]byte{}, slot, blobCount)
denebBlockRoot := denebBlock.Root()
verifiedStoredSidecars := verification.FakeVerifySliceForTest(t, storedBlobSidecars)
for i := range verifiedStoredSidecars {
err := blobStorage.Save(verifiedStoredSidecars[i])
require.NoError(t, err)
}
blockRoot := blobs[0].BlockRoot()
err = db.SaveBlock(t.Context(), denebBlock)
require.NoError(t, err)
// Create Electra block and blob sidecars. (Electra block = Fulu block),
// save the block, convert blob sidecars to data column sidecars and save the block.
fuluForkSlot := fuluForkEpoch * params.BeaconConfig().SlotsPerEpoch
fuluBlock, fuluBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fuluForkSlot, blobCount)
fuluBlockRoot := fuluBlock.Root()
cellsAndProofsList := make([]kzg.CellsAndProofs, 0, len(fuluBlobSidecars))
for _, blob := range fuluBlobSidecars {
var kzgBlob kzg.Blob
copy(kzgBlob[:], blob.Blob)
cellsAndProogs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
require.NoError(t, err)
cellsAndProofsList = append(cellsAndProofsList, cellsAndProogs)
}
dataColumnSidecarPb, err := peerdas.DataColumnSidecars(fuluBlock, cellsAndProofsList)
require.NoError(t, err)
verifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecarPb))
for _, sidecarPb := range dataColumnSidecarPb {
roDataColumn, err := blocks.NewRODataColumnWithRoot(sidecarPb, fuluBlockRoot)
require.NoError(t, err)
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, verifiedRoDataColumn)
}
err = db.SaveBlock(t.Context(), fuluBlock)
require.NoError(t, err)
t.Run("genesis", func(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{}
_, rpcErr := blocker.Blobs(ctx, "genesis", nil)
assert.Equal(t, http.StatusBadRequest, core.ErrorReasonToHTTP(rpcErr.Reason))
assert.StringContains(t, "blobs are not supported for Phase 0 fork", rpcErr.Err.Error())
require.Equal(t, http.StatusBadRequest, core.ErrorReasonToHTTP(rpcErr.Reason))
require.StringContains(t, "blobs are not supported for Phase 0 fork", rpcErr.Err.Error())
})
t.Run("head", func(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{Root: blockRoot[:]},
ChainInfoFetcher: &mockChain.ChainService{Root: denebBlockRoot[:]},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: bs,
BlobStorage: blobStorage,
}
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, "head", nil)
require.IsNil(t, rpcErr)
require.Equal(t, blobCount, len(retrievedVerifiedSidecars))
for i := range blobCount {
expected := verifiedStoredSidecars[i]
actual := retrievedVerifiedSidecars[i].BlobSidecar
require.NotNil(t, actual)
require.Equal(t, expected.Index, actual.Index)
require.DeepEqual(t, expected.Blob, actual.Blob)
require.DeepEqual(t, expected.KzgCommitment, actual.KzgCommitment)
require.DeepEqual(t, expected.KzgProof, actual.KzgProof)
}
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "head", nil)
assert.Equal(t, rpcErr == nil, true)
require.Equal(t, 4, len(verifiedBlobs))
sidecar := verifiedBlobs[0].BlobSidecar
require.NotNil(t, sidecar)
assert.Equal(t, uint64(0), sidecar.Index)
assert.DeepEqual(t, blobs[0].Blob, sidecar.Blob)
assert.DeepEqual(t, blobs[0].KzgCommitment, sidecar.KzgCommitment)
assert.DeepEqual(t, blobs[0].KzgProof, sidecar.KzgProof)
sidecar = verifiedBlobs[1].BlobSidecar
require.NotNil(t, sidecar)
assert.Equal(t, uint64(1), sidecar.Index)
assert.DeepEqual(t, blobs[1].Blob, sidecar.Blob)
assert.DeepEqual(t, blobs[1].KzgCommitment, sidecar.KzgCommitment)
assert.DeepEqual(t, blobs[1].KzgProof, sidecar.KzgProof)
sidecar = verifiedBlobs[2].BlobSidecar
require.NotNil(t, sidecar)
assert.Equal(t, uint64(2), sidecar.Index)
assert.DeepEqual(t, blobs[2].Blob, sidecar.Blob)
assert.DeepEqual(t, blobs[2].KzgCommitment, sidecar.KzgCommitment)
assert.DeepEqual(t, blobs[2].KzgProof, sidecar.KzgProof)
sidecar = verifiedBlobs[3].BlobSidecar
require.NotNil(t, sidecar)
assert.Equal(t, uint64(3), sidecar.Index)
assert.DeepEqual(t, blobs[3].Blob, sidecar.Blob)
assert.DeepEqual(t, blobs[3].KzgCommitment, sidecar.KzgCommitment)
assert.DeepEqual(t, blobs[3].KzgProof, sidecar.KzgProof)
})
t.Run("finalized", func(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Root: blockRoot[:]}},
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Root: denebBlockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: bs,
BlobStorage: blobStorage,
}
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "finalized", nil)
assert.Equal(t, rpcErr == nil, true)
require.Equal(t, 4, len(verifiedBlobs))
verifiedSidecars, rpcErr := blocker.Blobs(ctx, "finalized", nil)
require.IsNil(t, rpcErr)
require.Equal(t, blobCount, len(verifiedSidecars))
})
t.Run("justified", func(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{CurrentJustifiedCheckPoint: &ethpb.Checkpoint{Root: blockRoot[:]}},
ChainInfoFetcher: &mockChain.ChainService{CurrentJustifiedCheckPoint: &ethpb.Checkpoint{Root: denebBlockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: bs,
BlobStorage: blobStorage,
}
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "justified", nil)
assert.Equal(t, rpcErr == nil, true)
require.Equal(t, 4, len(verifiedBlobs))
verifiedSidecars, rpcErr := blocker.Blobs(ctx, "justified", nil)
require.IsNil(t, rpcErr)
require.Equal(t, blobCount, len(verifiedSidecars))
})
t.Run("root", func(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: bs,
BlobStorage: blobStorage,
}
verifiedBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(blockRoot[:]), nil)
assert.Equal(t, rpcErr == nil, true)
require.Equal(t, 4, len(verifiedBlobs))
verifiedBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(denebBlockRoot[:]), nil)
require.IsNil(t, rpcErr)
require.Equal(t, blobCount, len(verifiedBlobs))
})
t.Run("slot", func(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: bs,
BlobStorage: blobStorage,
}
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123", nil)
assert.Equal(t, rpcErr == nil, true)
require.Equal(t, 4, len(verifiedBlobs))
require.IsNil(t, rpcErr)
require.Equal(t, blobCount, len(verifiedBlobs))
})
t.Run("one blob only", func(t *testing.T) {
const index = 2
setupDeneb(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Root: blockRoot[:]}},
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Root: denebBlockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: bs,
BlobStorage: blobStorage,
}
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123", []int{2})
assert.Equal(t, rpcErr == nil, true)
require.Equal(t, 1, len(verifiedBlobs))
sidecar := verifiedBlobs[0].BlobSidecar
require.NotNil(t, sidecar)
assert.Equal(t, uint64(2), sidecar.Index)
assert.DeepEqual(t, blobs[2].Blob, sidecar.Blob)
assert.DeepEqual(t, blobs[2].KzgCommitment, sidecar.KzgCommitment)
assert.DeepEqual(t, blobs[2].KzgProof, sidecar.KzgProof)
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, "123", []int{index})
require.IsNil(t, rpcErr)
require.Equal(t, 1, len(retrievedVerifiedSidecars))
expected := verifiedStoredSidecars[index]
actual := retrievedVerifiedSidecars[0].BlobSidecar
require.NotNil(t, actual)
require.Equal(t, uint64(index), actual.Index)
require.DeepEqual(t, expected.Blob, actual.Blob)
require.DeepEqual(t, expected.KzgCommitment, actual.KzgCommitment)
require.DeepEqual(t, expected.KzgProof, actual.KzgProof)
})
t.Run("no blobs returns an empty array", func(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Root: blockRoot[:]}},
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Root: denebBlockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: filesystem.NewEphemeralBlobStorage(t),
}
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123", nil)
assert.Equal(t, rpcErr == nil, true)
require.IsNil(t, rpcErr)
require.Equal(t, 0, len(verifiedBlobs))
})
t.Run("no blob at index", func(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Root: blockRoot[:]}},
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Root: denebBlockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: bs,
BlobStorage: blobStorage,
}
noBlobIndex := len(blobs) + 1
noBlobIndex := len(storedBlobSidecars) + 1
_, rpcErr := blocker.Blobs(ctx, "123", []int{0, noBlobIndex})
require.NotNil(t, rpcErr)
assert.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
})
t.Run("index too big", func(t *testing.T) {
setupDeneb(t)
blocker := &BeaconDbBlocker{
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Root: blockRoot[:]}},
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{Root: denebBlockRoot[:]}},
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: bs,
BlobStorage: blobStorage,
}
_, rpcErr := blocker.Blobs(ctx, "123", []int{0, math.MaxInt})
require.NotNil(t, rpcErr)
assert.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
require.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
})
t.Run("not enough stored data column sidecars", func(t *testing.T) {
setupFulu(t)
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[:fieldparams.CellsPerBlob-1])
require.NoError(t, err)
blocker := &BeaconDbBlocker{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: blobStorage,
DataColumnStorage: dataColumnStorage,
}
_, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
require.NotNil(t, rpcErr)
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
})
t.Run("reconstruction needed", func(t *testing.T) {
setupFulu(t)
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[1 : peerdas.MinimumColumnsCountToReconstruct()+1])
require.NoError(t, err)
blocker := &BeaconDbBlocker{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: blobStorage,
DataColumnStorage: dataColumnStorage,
}
retrievedVerifiedRoBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
require.IsNil(t, rpcErr)
require.Equal(t, len(fuluBlobSidecars), len(retrievedVerifiedRoBlobs))
for i, retrievedVerifiedRoBlob := range retrievedVerifiedRoBlobs {
retrievedBlobSidecarPb := retrievedVerifiedRoBlob.BlobSidecar
initialBlobSidecarPb := fuluBlobSidecars[i].BlobSidecar
require.DeepSSZEqual(t, initialBlobSidecarPb, retrievedBlobSidecarPb)
}
})
t.Run("no reconstruction needed", func(t *testing.T) {
setupFulu(t)
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
require.NoError(t, err)
blocker := &BeaconDbBlocker{
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
Genesis: time.Now(),
},
BeaconDB: db,
BlobStorage: blobStorage,
DataColumnStorage: dataColumnStorage,
}
retrievedVerifiedRoBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
require.IsNil(t, rpcErr)
require.Equal(t, len(fuluBlobSidecars), len(retrievedVerifiedRoBlobs))
for i, retrievedVerifiedRoBlob := range retrievedVerifiedRoBlobs {
retrievedBlobSidecarPb := retrievedVerifiedRoBlob.BlobSidecar
initialBlobSidecarPb := fuluBlobSidecars[i].BlobSidecar
require.DeepSSZEqual(t, initialBlobSidecarPb, retrievedBlobSidecarPb)
}
})
}

View File

@@ -12,7 +12,6 @@ import (
)
func TestServer_GetBeaconConfig(t *testing.T) {
t.Skip("this is a weird test")
ctx := t.Context()
bs := &Server{}
res, err := bs.GetBeaconConfig(ctx, &emptypb.Empty{})

View File

@@ -109,8 +109,6 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
peerInfo.MetadataV0 = metadata.MetadataObjV0()
case metadata.MetadataObjV1() != nil:
peerInfo.MetadataV1 = metadata.MetadataObjV1()
case metadata.MetadataObjV2() != nil:
peerInfo.MetadataV2 = metadata.MetadataObjV2()
}
}
addresses := peerStore.Addrs(pid)
@@ -129,7 +127,7 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
if err != nil {
// In the event chain state is non existent, we
// initialize with the zero value.
pStatus = new(ethpb.StatusV2)
pStatus = new(ethpb.Status)
}
lastUpdated, err := peers.ChainStateLastUpdated(pid)
if err != nil {
@@ -152,17 +150,6 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
BehaviourPenalty: float32(bPenalty),
ValidationError: errorToString(peers.Scorers().ValidationError(pid)),
}
// Convert statusV2 into status
// TODO: Should we do it this way or the other way around?
peerStatus := &ethpb.Status{
ForkDigest: pStatus.ForkDigest,
FinalizedRoot: pStatus.FinalizedRoot,
FinalizedEpoch: pStatus.FinalizedEpoch,
HeadRoot: pStatus.HeadRoot,
HeadSlot: pStatus.HeadSlot,
}
return &ethpb.DebugPeerResponse{
ListeningAddresses: stringAddrs,
Direction: pbDirection,
@@ -170,7 +157,7 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
PeerId: pid.String(),
Enr: enr,
PeerInfo: peerInfo,
PeerStatus: peerStatus,
PeerStatus: pStatus,
LastUpdated: unixTime,
ScoreInfo: scoreInfo,
}, nil

View File

@@ -1,5 +1,3 @@
# gazelle:ignore
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
@@ -39,7 +37,6 @@ go_library(
"//api/client/builder:go_default_library",
"//async/event:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/builder:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
@@ -50,7 +47,6 @@ go_library(
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
@@ -88,6 +84,7 @@ go_library(
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//network/forks:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
@@ -183,6 +180,7 @@ common_deps = [
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
]
# gazelle:ignore
go_test(
name = "go_default_test",
timeout = "moderate",

View File

@@ -39,13 +39,16 @@ func (vs *Server) constructGenericBeaconBlock(
return vs.constructBellatrixBlock(blockProto, isBlinded, bidStr), nil
case version.Capella:
return vs.constructCapellaBlock(blockProto, isBlinded, bidStr), nil
case version.Deneb, version.Electra:
case version.Deneb:
bundle, ok := blobsBundler.(*enginev1.BlobsBundle)
if blobsBundler != nil && !ok {
return nil, fmt.Errorf("expected *BlobsBundler, got %T", blobsBundler)
}
if sBlk.Version() == version.Deneb {
return vs.constructDenebBlock(blockProto, isBlinded, bidStr, bundle), nil
return vs.constructDenebBlock(blockProto, isBlinded, bidStr, bundle), nil
case version.Electra:
bundle, ok := blobsBundler.(*enginev1.BlobsBundle)
if blobsBundler != nil && !ok {
return nil, fmt.Errorf("expected *BlobsBundler, got %T", blobsBundler)
}
return vs.constructElectraBlock(blockProto, isBlinded, bidStr, bundle), nil
case version.Fulu:

View File

@@ -29,19 +29,12 @@ func TestConstructGenericBeaconBlock(t *testing.T) {
require.NoError(t, err)
r1, err := eb.Block.HashTreeRoot()
require.NoError(t, err)
bundle := &enginev1.BlobsBundleV2{
KzgCommitments: [][]byte{{1, 2, 3}},
Proofs: [][]byte{{4, 5, 6}},
Blobs: [][]byte{{7, 8, 9}},
}
result, err := vs.constructGenericBeaconBlock(b, bundle, primitives.ZeroWei())
result, err := vs.constructGenericBeaconBlock(b, nil, primitives.ZeroWei())
require.NoError(t, err)
r2, err := result.GetFulu().Block.HashTreeRoot()
require.NoError(t, err)
require.Equal(t, r1, r2)
require.Equal(t, result.IsBlinded, false)
require.DeepEqual(t, bundle.Blobs, result.GetFulu().GetBlobs())
require.DeepEqual(t, bundle.Proofs, result.GetFulu().GetKzgProofs())
})
// Test for Electra version

View File

@@ -261,6 +261,10 @@ func (vs *Server) buildValidatorDuty(
}
func populateCommitteeFields(duty *ethpb.DutiesV2Response_Duty, la *helpers.LiteAssignment) {
if duty == nil || la == nil {
// should never be the case as previous functions should set
return
}
duty.CommitteeLength = la.CommitteeLength
duty.CommitteeIndex = la.CommitteeIndex
duty.ValidatorCommitteeIndex = la.ValidatorCommitteeIndex

View File

@@ -15,13 +15,9 @@ import (
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/config/features"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
@@ -62,31 +58,28 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
if err != nil {
log.WithError(err).Error("Could not convert slot to time")
}
log := log.WithField("slot", req.Slot)
log.WithField("sinceSlotStartTime", time.Since(t)).Info("Begin building block")
log.WithFields(logrus.Fields{
"slot": req.Slot,
"sinceSlotStartTime": time.Since(t),
}).Info("Begin building block")
// A syncing validator should not produce a block.
if vs.SyncChecker.Syncing() {
log.Error("Fail to build block: node is syncing")
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
}
// An optimistic validator MUST NOT produce a block (i.e., sign across the DOMAIN_BEACON_PROPOSER domain).
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
if err := vs.optimisticStatus(ctx); err != nil {
log.WithError(err).Error("Fail to build block: node is optimistic")
return nil, status.Errorf(codes.Unavailable, "Validator is not ready to propose: %v", err)
}
}
head, parentRoot, err := vs.getParentState(ctx, req.Slot)
if err != nil {
log.WithError(err).Error("Fail to build block: could not get parent state")
return nil, err
}
sBlk, err := getEmptyBlock(req.Slot)
if err != nil {
log.WithError(err).Error("Fail to build block: could not get empty block")
return nil, status.Errorf(codes.Internal, "Could not prepare block: %v", err)
}
// Set slot, graffiti, randao reveal, and parent root.
@@ -98,7 +91,6 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
// Set proposer index.
idx, err := helpers.BeaconProposerIndex(ctx, head)
if err != nil {
log.WithError(err).Error("Fail to build block: could not calculate proposer index")
return nil, fmt.Errorf("could not calculate proposer index %w", err)
}
sBlk.SetProposerIndex(idx)
@@ -109,7 +101,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
}
resp, err := vs.BuildBlockParallel(ctx, sBlk, head, req.SkipMevBoost, builderBoostFactor)
log = log.WithFields(logrus.Fields{
log := log.WithFields(logrus.Fields{
"slot": req.Slot,
"sinceSlotStartTime": time.Since(t),
"validator": sBlk.Block().ProposerIndex(),
@@ -282,13 +274,7 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
//
// ProposeBeaconBlock handles the proposal of beacon blocks.
// TODO: Add tests
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
var (
blobSidecars []*ethpb.BlobSidecar
dataColumnSideCars []*ethpb.DataColumnSidecar
)
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
defer span.End()
@@ -300,12 +286,12 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "%s: %v", "decode block failed", err)
}
isPeerDASEnabled := coreTime.PeerDASIsActive(block.Block().Slot())
var sidecars []*ethpb.BlobSidecar
if block.IsBlinded() {
block, blobSidecars, dataColumnSideCars, err = vs.handleBlindedBlock(ctx, block, isPeerDASEnabled)
block, sidecars, err = vs.handleBlindedBlock(ctx, block)
} else if block.Version() >= version.Deneb {
blobSidecars, dataColumnSideCars, err = vs.handleUnblindedBlock(block, req, isPeerDASEnabled)
sidecars, err = vs.blobSidecarsFromUnblindedBlock(block, req)
}
if err != nil {
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
@@ -316,10 +302,9 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
return nil, status.Errorf(codes.Internal, "Could not hash tree root: %v", err)
}
slot := block.Block().Slot()
var wg sync.WaitGroup
errChan := make(chan error, 1)
wg.Add(1)
go func() {
defer wg.Done()
@@ -330,14 +315,8 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
errChan <- nil
}()
if isPeerDASEnabled {
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSideCars, root, slot); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive data columns: %v", err)
}
} else {
if err := vs.broadcastAndReceiveBlobs(ctx, blobSidecars, root); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
}
if err := vs.broadcastAndReceiveBlobs(ctx, sidecars, root); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
}
wg.Wait()
@@ -349,75 +328,46 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
}
// handleBlindedBlock processes blinded beacon blocks.
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock, isPeerDASEnabled bool) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, error) {
if block.Version() < version.Bellatrix {
return nil, nil, nil, errors.New("pre-Bellatrix blinded block")
return nil, nil, errors.New("pre-Bellatrix blinded block")
}
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
return nil, nil, nil, errors.New("unconfigured block builder")
return nil, nil, errors.New("unconfigured block builder")
}
copiedBlock, err := block.Copy()
if err != nil {
return nil, nil, nil, errors.Wrap(err, "block copy")
return nil, nil, err
}
payload, bundle, err := vs.BlockBuilder.SubmitBlindedBlock(ctx, block)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "submit blinded block")
return nil, nil, errors.Wrap(err, "submit blinded block failed")
}
if err := copiedBlock.Unblind(payload); err != nil {
return nil, nil, nil, errors.Wrap(err, "unblind")
return nil, nil, errors.Wrap(err, "unblind failed")
}
if isPeerDASEnabled {
dataColumnSideCars, err := peerdas.ConstructDataColumnSidecars(block, bundle.GetBlobs(), bundle.GetProofs())
if err != nil {
return nil, nil, nil, errors.Wrap(err, "construct data column sidecars")
}
return copiedBlock, nil, dataColumnSideCars, nil
}
blobSidecars, err := unblindBlobsSidecars(copiedBlock, bundle)
sidecars, err := unblindBlobsSidecars(copiedBlock, bundle)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "unblind blobs sidecars: commitment value doesn't match block")
return nil, nil, errors.Wrap(err, "unblind blobs sidecars: commitment value doesn't match block")
}
return copiedBlock, blobSidecars, nil, nil
return copiedBlock, sidecars, nil
}
func (vs *Server) handleUnblindedBlock(
block interfaces.SignedBeaconBlock,
req *ethpb.GenericSignedBeaconBlock,
isPeerDASEnabled bool,
) ([]*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
func (vs *Server) blobSidecarsFromUnblindedBlock(block interfaces.SignedBeaconBlock, req *ethpb.GenericSignedBeaconBlock) ([]*ethpb.BlobSidecar, error) {
rawBlobs, proofs, err := blobsAndProofs(req)
if err != nil {
return nil, nil, err
return nil, err
}
if isPeerDASEnabled {
dataColumnSideCars, err := peerdas.ConstructDataColumnSidecars(block, rawBlobs, proofs)
if err != nil {
return nil, nil, errors.Wrap(err, "construct data column sidecars")
}
return nil, dataColumnSideCars, nil
}
blobSidecars, err := BuildBlobSidecars(block, rawBlobs, proofs)
if err != nil {
return nil, nil, errors.Wrap(err, "build blob sidecars")
}
return blobSidecars, nil, nil
return BuildBlobSidecars(block, rawBlobs, proofs)
}
// broadcastReceiveBlock broadcasts a block and handles its reception.
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [32]byte) error {
protoBlock, err := block.Proto()
if err != nil {
return errors.Wrap(err, "protobuf conversion failed")
@@ -433,7 +383,7 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.Si
}
// broadcastAndReceiveBlobs handles the broadcasting and reception of blob sidecars.
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [fieldparams.RootLength]byte) error {
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [32]byte) error {
eg, eCtx := errgroup.WithContext(ctx)
for i, sc := range sidecars {
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
@@ -462,69 +412,6 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
return eg.Wait()
}
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
func (vs *Server) broadcastAndReceiveDataColumns(
ctx context.Context,
sidecars []*ethpb.DataColumnSidecar,
root [fieldparams.RootLength]byte,
slot primitives.Slot,
) error {
dataColumnsWithholdCount := features.Get().DataColumnsWithholdCount
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(sidecars))
eg, _ := errgroup.WithContext(ctx)
for _, sd := range sidecars {
roDataColumn, err := blocks.NewRODataColumnWithRoot(sd, root)
if err != nil {
return errors.Wrap(err, "new read-only data column with root")
}
// We build this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
// See https://golang.org/doc/faq#closures_and_goroutines for more details.
sidecar := sd
eg.Go(func() error {
if sidecar.Index < dataColumnsWithholdCount {
log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", root),
"slot": slot,
"index": sidecar.Index,
}).Warning("Withholding data column")
return nil
}
// Compute the subnet index based on the column index.
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
if err := vs.P2P.BroadcastDataColumn(root, subnet, sidecar); err != nil {
return errors.Wrap(err, "broadcast data column")
}
return nil
})
}
if err := eg.Wait(); err != nil {
return errors.Wrap(err, "wait for data columns to be broadcasted")
}
if err := vs.DataColumnReceiver.ReceiveDataColumns(verifiedRODataColumns); err != nil {
return errors.Wrap(err, "receive data column")
}
for _, verifiedRODataColumn := range verifiedRODataColumns {
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.DataColumnSidecarReceived,
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn}, // #nosec G601
})
}
return nil
}
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
//
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.

View File

@@ -19,6 +19,7 @@ import (
"github.com/OffchainLabs/prysm/v6/encoding/ssz"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v6/network/forks"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
@@ -219,9 +220,14 @@ func (vs *Server) getPayloadHeaderFromBuilder(
if signedBid == nil || signedBid.IsNil() {
return nil, errors.New("builder returned nil bid")
}
epoch := slots.ToEpoch(slot)
entry := params.GetNetworkScheduleEntry(epoch)
forkName := version.String(entry.VersionEnum)
fork, err := forks.Fork(slots.ToEpoch(slot))
if err != nil {
return nil, errors.Wrap(err, "unable to get fork information")
}
forkName, ok := params.BeaconConfig().ForkVersionNames[bytesutil.ToBytes4(fork.CurrentVersion)]
if !ok {
return nil, errors.New("unable to find current fork in schedule")
}
if !strings.EqualFold(version.String(signedBid.Version()), forkName) {
return nil, fmt.Errorf("builder bid response version: %d is different from head block version: %d for epoch %d", signedBid.Version(), b.Version(), slots.ToEpoch(slot))
}

View File

@@ -29,6 +29,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"google.golang.org/grpc/codes"
@@ -66,7 +67,6 @@ type Server struct {
SyncCommitteePool synccommittee.Pool
BlockReceiver blockchain.BlockReceiver
BlobReceiver blockchain.BlobReceiver
DataColumnReceiver blockchain.DataColumnReceiver
MockEth1Votes bool
Eth1BlockFetcher execution.POWBlockFetcher
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher
@@ -155,7 +155,7 @@ func (vs *Server) ValidatorIndex(ctx context.Context, req *ethpb.ValidatorIndexR
//
// DomainData fetches the current domain version information from the beacon state.
func (vs *Server) DomainData(ctx context.Context, request *ethpb.DomainRequest) (*ethpb.DomainResponse, error) {
fork, err := params.Fork(request.Epoch)
fork, err := forks.Fork(request.Epoch)
if err != nil {
return nil, err
}

View File

@@ -22,7 +22,6 @@ import (
"github.com/OffchainLabs/prysm/v6/testing/mock"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/ethereum/go-ethereum/common/hexutil"
logTest "github.com/sirupsen/logrus/hooks/test"
"go.uber.org/mock/gomock"
"google.golang.org/grpc/codes"
@@ -319,16 +318,23 @@ func TestWaitForChainStart_NotStartedThenLogFired(t *testing.T) {
}
func TestServer_DomainData_Exits(t *testing.T) {
params.SetActiveTestCleanup(t, params.MainnetConfig())
cfg := params.BeaconConfig()
beaconState := &ethpb.BeaconStateDeneb{
Slot: 1 + primitives.Slot(cfg.DenebForkEpoch)*cfg.SlotsPerEpoch,
GenesisValidatorsRoot: cfg.GenesisValidatorsRoot[:],
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
[4]byte(cfg.GenesisForkVersion): primitives.Epoch(0),
[4]byte(cfg.AltairForkVersion): primitives.Epoch(5),
[4]byte(cfg.BellatrixForkVersion): primitives.Epoch(10),
[4]byte(cfg.CapellaForkVersion): primitives.Epoch(15),
[4]byte(cfg.DenebForkVersion): primitives.Epoch(20),
}
params.OverrideBeaconConfig(cfg)
beaconState := &ethpb.BeaconStateBellatrix{
Slot: 4000,
}
block := util.NewBeaconBlock()
genesisRoot, err := block.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
s, err := state_native.InitializeFromProtoUnsafeDeneb(beaconState)
s, err := state_native.InitializeFromProtoUnsafeBellatrix(beaconState)
require.NoError(t, err)
vs := &Server{
Ctx: t.Context(),
@@ -336,19 +342,14 @@ func TestServer_DomainData_Exits(t *testing.T) {
HeadFetcher: &mockChain.ChainService{State: s, Root: genesisRoot[:]},
}
epoch := cfg.DenebForkEpoch
reqDomain, err := vs.DomainData(t.Context(), &ethpb.DomainRequest{
Epoch: epoch,
Domain: cfg.DomainDeposit[:],
Epoch: 100,
Domain: params.BeaconConfig().DomainDeposit[:],
})
entry := params.GetNetworkScheduleEntry(epoch)
require.Equal(t, entry.ForkVersion, [4]byte(cfg.DenebForkVersion))
assert.NoError(t, err)
gvr := cfg.GenesisValidatorsRoot
wantedDomain, err := signing.ComputeDomain(cfg.DomainDeposit, entry.ForkVersion[:], gvr[:])
wantedDomain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, params.BeaconConfig().DenebForkVersion, make([]byte, 32))
assert.NoError(t, err)
assert.Equal(t, true, hexutil.Encode(reqDomain.SignatureDomain), hexutil.Encode(wantedDomain))
assert.DeepEqual(t, reqDomain.SignatureDomain, wantedDomain)
beaconStateNew := &ethpb.BeaconStateDeneb{
Slot: 4000,
@@ -359,11 +360,11 @@ func TestServer_DomainData_Exits(t *testing.T) {
reqDomain, err = vs.DomainData(t.Context(), &ethpb.DomainRequest{
Epoch: 100,
Domain: cfg.DomainVoluntaryExit[:],
Domain: params.BeaconConfig().DomainVoluntaryExit[:],
})
require.NoError(t, err)
wantedDomain, err = signing.ComputeDomain(cfg.DomainVoluntaryExit, cfg.CapellaForkVersion, make([]byte, 32))
wantedDomain, err = signing.ComputeDomain(params.BeaconConfig().DomainVoluntaryExit, params.BeaconConfig().CapellaForkVersion, make([]byte, 32))
require.NoError(t, err)
assert.DeepEqual(t, reqDomain.SignatureDomain, wantedDomain)

View File

@@ -89,7 +89,6 @@ type Config struct {
AttestationReceiver blockchain.AttestationReceiver
BlockReceiver blockchain.BlockReceiver
BlobReceiver blockchain.BlobReceiver
DataColumnReceiver blockchain.DataColumnReceiver
ExecutionChainService execution.Chain
ChainStartFetcher execution.ChainStartFetcher
ExecutionChainInfoFetcher execution.ChainInfoFetcher
@@ -121,7 +120,6 @@ type Config struct {
Router *http.ServeMux
ClockWaiter startup.ClockWaiter
BlobStorage *filesystem.BlobStorage
DataColumnStorage *filesystem.DataColumnStorage
TrackedValidatorsCache *cache.TrackedValidatorsCache
PayloadIDCache *cache.PayloadIDCache
LCStore *lightClient.Store
@@ -198,7 +196,6 @@ func NewService(ctx context.Context, cfg *Config) *Service {
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
BlobStorage: s.cfg.BlobStorage,
DataColumnStorage: s.cfg.DataColumnStorage,
}
rewardFetcher := &rewards.BlockRewardService{Replayer: ch, DB: s.cfg.BeaconDB}
coreService := &core.Service{
@@ -239,7 +236,6 @@ func NewService(ctx context.Context, cfg *Config) *Service {
P2P: s.cfg.Broadcaster,
BlockReceiver: s.cfg.BlockReceiver,
BlobReceiver: s.cfg.BlobReceiver,
DataColumnReceiver: s.cfg.DataColumnReceiver,
MockEth1Votes: s.cfg.MockEth1Votes,
Eth1BlockFetcher: s.cfg.ExecutionChainService,
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,

View File

@@ -35,21 +35,12 @@ func (g *Clock) GenesisValidatorsRoot() [32]byte {
return g.vr
}
// GenesisValidatorsRoot returns the genesis state validator root as a slice for convenience.
func (g *Clock) GenesisValidatorsRootSlice() []byte {
return g.vr[:]
}
// CurrentSlot returns the current slot relative to the time.Time value that Clock embeds.
func (g *Clock) CurrentSlot() types.Slot {
now := g.now()
return slots.Duration(g.t, now)
}
func (g *Clock) CurrentEpoch() types.Epoch {
return slots.ToEpoch(g.CurrentSlot())
}
// SlotStart computes the time the given slot begins.
func (g *Clock) SlotStart(slot types.Slot) time.Time {
return slots.BeginsAt(slot, g.t)

View File

@@ -3,15 +3,14 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"lookup.go",
"mainnet.go",
"genesis.go",
"genesis_mainnet.go",
],
embedsrcs = ["mainnet.ssz.snappy"],
importpath = "github.com/OffchainLabs/prysm/v6/genesis/embedded",
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/state/genesis",
visibility = [
"//beacon-chain/db:__subpackages__",
"//config/params:__pkg__",
"//genesis:__pkg__",
],
deps = [
"//beacon-chain/state:go_default_library",

View File

@@ -0,0 +1,34 @@
package genesis
import (
_ "embed"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/golang/snappy"
)
var embeddedStates = map[string]*[]byte{}
// State returns a copy of the genesis state from a hardcoded value.
func State(name string) (state.BeaconState, error) {
sb, exists := embeddedStates[name]
if exists {
return load(*sb)
}
return nil, nil
}
// load a compressed ssz state file into a beacon state struct.
func load(b []byte) (state.BeaconState, error) {
st := &ethpb.BeaconState{}
b, err := snappy.Decode(nil /*dst*/, b)
if err != nil {
return nil, err
}
if err := st.UnmarshalSSZ(b); err != nil {
return nil, err
}
return state_native.InitializeFromProtoUnsafePhase0(st)
}

View File

@@ -1,7 +1,7 @@
//go:build !noMainnetGenesis
// +build !noMainnetGenesis
package embedded
package genesis
import (
_ "embed"

View File

@@ -1,10 +1,10 @@
package embedded_test
package genesis_test
import (
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/genesis"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/genesis/embedded"
)
func TestGenesisState(t *testing.T) {
@@ -17,7 +17,7 @@ func TestGenesisState(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
st, err := embedded.ByName(tt.name)
st, err := genesis.State(tt.name)
if err != nil {
t.Fatal(err)
}

View File

@@ -350,7 +350,3 @@ type WriteOnlyDeposits interface {
type WriteOnlyProposerLookahead interface {
SetProposerLookahead([]primitives.ValidatorIndex) error
}
func IsNil(s BeaconState) bool {
return s == nil || s.IsNil()
}

View File

@@ -34,7 +34,3 @@ var fieldMap map[types.FieldIndex]types.DataType
func errNotSupported(funcName string, ver int) error {
return fmt.Errorf("%s is not supported for %s", funcName, version.String(ver))
}
func IsNil(s state.BeaconState) bool {
return s == nil || s.IsNil()
}

View File

@@ -259,7 +259,7 @@ func (s *State) latestAncestor(ctx context.Context, blockRoot [32]byte) (state.B
defer span.End()
if s.isFinalizedRoot(blockRoot) {
finalizedState := s.FinalizedState()
finalizedState := s.finalizedState()
if finalizedState != nil {
return finalizedState, nil
}
@@ -297,7 +297,7 @@ func (s *State) latestAncestor(ctx context.Context, blockRoot [32]byte) (state.B
// Does the state exist in finalized info cache.
if s.isFinalizedRoot(parentRoot) {
return s.FinalizedState(), nil
return s.finalizedState(), nil
}
// Does the state exist in epoch boundary cache.

Some files were not shown because too many files have changed in this diff Show More