mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
100 Commits
beacon-nod
...
blob-proce
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
edbf5d11d1 | ||
|
|
1148d96313 | ||
|
|
daf5e9b286 | ||
|
|
309a863358 | ||
|
|
15b88ed7f2 | ||
|
|
4b4855d65c | ||
|
|
03564ee93b | ||
|
|
96541c2c6c | ||
|
|
0bc55339a7 | ||
|
|
2618852090 | ||
|
|
fb3e42c1b3 | ||
|
|
0e2a9d0d82 | ||
|
|
6573cccf1d | ||
|
|
a89727f38c | ||
|
|
5e1fb15c40 | ||
|
|
5de8ec4600 | ||
|
|
7e88eefc60 | ||
|
|
cabf3476e7 | ||
|
|
5a01eecc50 | ||
|
|
b608c9f711 | ||
|
|
671bf00c98 | ||
|
|
cbf6a2752d | ||
|
|
642458f037 | ||
|
|
2a067d5d03 | ||
|
|
a2f60364ae | ||
|
|
45f68fa8d5 | ||
|
|
f55708b995 | ||
|
|
00826e8858 | ||
|
|
76fec1799e | ||
|
|
9c938d354d | ||
|
|
83932d8e05 | ||
|
|
beebb56c8e | ||
|
|
0920fb1f61 | ||
|
|
29f8880638 | ||
|
|
f91efafe24 | ||
|
|
9387a36b66 | ||
|
|
65ce27292c | ||
|
|
823f8ee3a2 | ||
|
|
88e1b9edb3 | ||
|
|
c7e28908f5 | ||
|
|
7143fe80bc | ||
|
|
e231d88ca0 | ||
|
|
0486b64dcc | ||
|
|
b4847ac9ad | ||
|
|
bc125a95ae | ||
|
|
f592bf7f07 | ||
|
|
bcc23d2ded | ||
|
|
6e0715e92a | ||
|
|
71fa70ce40 | ||
|
|
4809da62cc | ||
|
|
e10dbaa8b4 | ||
|
|
71b08a50b7 | ||
|
|
cb5ce74a23 | ||
|
|
cc81444e13 | ||
|
|
bfae7f3c9f | ||
|
|
2fc5011091 | ||
|
|
493a7179d7 | ||
|
|
b52baba2f1 | ||
|
|
2f378a045a | ||
|
|
58cdb29ef3 | ||
|
|
cc2b4db582 | ||
|
|
be9b6ea837 | ||
|
|
806a394c89 | ||
|
|
97a99874e8 | ||
|
|
945b087ca9 | ||
|
|
b57effd096 | ||
|
|
867db1aeee | ||
|
|
99843688cd | ||
|
|
a536612c39 | ||
|
|
c5501f8775 | ||
|
|
55e4c6e1db | ||
|
|
2806326155 | ||
|
|
d7318ea485 | ||
|
|
e183d1dff4 | ||
|
|
a3868e7fc6 | ||
|
|
af70677778 | ||
|
|
8eb82dd378 | ||
|
|
0bd232667b | ||
|
|
39072e1b74 | ||
|
|
66011d5d9c | ||
|
|
419dbd57f7 | ||
|
|
da6ae3c204 | ||
|
|
44973b0bb3 | ||
|
|
de0c7e6256 | ||
|
|
c1c0cd040c | ||
|
|
734eb98941 | ||
|
|
ffaef83634 | ||
|
|
f9a40ef111 | ||
|
|
7454041356 | ||
|
|
f37301c0c0 | ||
|
|
cf1bfb9d67 | ||
|
|
9d8dd5c9ad | ||
|
|
f812bdcf60 | ||
|
|
58c0899676 | ||
|
|
4628c19f51 | ||
|
|
58f23d2302 | ||
|
|
9e33723b26 | ||
|
|
2c32a87d17 | ||
|
|
56f3dafb54 | ||
|
|
70380660b3 |
30
WORKSPACE
30
WORKSPACE
@@ -65,10 +65,10 @@ bazel_skylib_workspace()
|
||||
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
sha256 = "29d5dafc2a5582995488c6735115d1d366fcd6a0fc2e2a153f02988706349825",
|
||||
sha256 = "d3fa66a39028e97d76f9e2db8f1b0c11c099e8e01bf363a923074784e451f809",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.31.0/bazel-gazelle-v0.31.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.31.0/bazel-gazelle-v0.31.0.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.33.0/bazel-gazelle-v0.33.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -110,10 +110,10 @@ http_archive(
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "bfc5ce70b9d1634ae54f4e7b495657a18a04e0d596785f672d35d5f505ab491a",
|
||||
sha256 = "91585017debb61982f7054c9688857a2ad1fd823fc3f9cb05048b0025c47d023",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.40.0/rules_go-v0.40.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.40.0/rules_go-v0.40.0.zip",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.42.0/rules_go-v0.42.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.42.0/rules_go-v0.42.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -206,7 +206,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.20.7",
|
||||
go_version = "1.20.10",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -374,6 +374,22 @@ http_archive(
|
||||
)
|
||||
|
||||
# External dependencies
|
||||
http_archive(
|
||||
name = "googleapis",
|
||||
sha256 = "9d1a930e767c93c825398b8f8692eca3fe353b9aaadedfbcf1fca2282c85df88",
|
||||
strip_prefix = "googleapis-64926d52febbf298cb82a8f472ade4a3969ba922",
|
||||
urls = [
|
||||
"https://github.com/googleapis/googleapis/archive/64926d52febbf298cb82a8f472ade4a3969ba922.zip",
|
||||
],
|
||||
)
|
||||
|
||||
load("@googleapis//:repository_rules.bzl", "switched_rules_by_language")
|
||||
|
||||
switched_rules_by_language(
|
||||
name = "com_google_googleapis_imports",
|
||||
go = True,
|
||||
)
|
||||
|
||||
load("//:deps.bzl", "prysm_deps")
|
||||
|
||||
# gazelle:repository_macro deps.bzl%prysm_deps
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
"//api/client:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/rpc/eth/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"text/template"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
@@ -284,7 +285,7 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
|
||||
|
||||
// SubmitChangeBLStoExecution calls a beacon API endpoint to set the withdrawal addresses based on the given signed messages.
|
||||
// If the API responds with something other than OK there will be failure messages associated to the corresponding request message.
|
||||
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apimiddleware.SignedBLSToExecutionChangeJson) error {
|
||||
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*shared.SignedBLSToExecutionChange) error {
|
||||
u := c.BaseURL().ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
|
||||
body, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
@@ -323,12 +324,12 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apim
|
||||
|
||||
// GetBLStoExecutionChanges gets all the set withdrawal messages in the node's operation pool.
|
||||
// Returns a struct representation of json response.
|
||||
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.BLSToExecutionChangesPoolResponseJson, error) {
|
||||
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*beacon.BLSToExecutionChangesPoolResponse, error) {
|
||||
body, err := c.Get(ctx, changeBLStoExecutionPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
poolResponse := &apimiddleware.BLSToExecutionChangesPoolResponseJson{}
|
||||
poolResponse := &beacon.BLSToExecutionChangesPoolResponse{}
|
||||
err = json.Unmarshal(body, poolResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -876,7 +876,7 @@ func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedB
|
||||
},
|
||||
SyncAggregate: ð.SyncAggregate{
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
SyncCommitteeBits: bitfield.Bitvector512(ezDecode(t, "0x6451e9f951ebf05edc01de67e593484b672877054f055903ff0df1a1a945cf30ca26bb4d4b154f94a1bc776bcf5d0efb3603e1f9b8ee2499ccdcfe2a18cef458")),
|
||||
SyncCommitteeBits: ezDecode(t, "0x6451e9f951ebf05edc01de67e593484b672877054f055903ff0df1a1a945cf30ca26bb4d4b154f94a1bc776bcf5d0efb3603e1f9b8ee2499ccdcfe2a18cef458"),
|
||||
},
|
||||
ExecutionPayloadHeader: &v1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//validator/rpc:go_default_library",
|
||||
"//validator/rpc/apimiddleware:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/validator/rpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/validator/rpc/apimiddleware"
|
||||
)
|
||||
|
||||
@@ -41,7 +42,7 @@ func (c *Client) GetValidatorPubKeys(ctx context.Context) ([]string, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(jsonlocal.Keystores) == 0 && len(jsonremote.Keystores) == 0 {
|
||||
if len(jsonlocal.Keystores) == 0 && len(jsonremote.Data) == 0 {
|
||||
return nil, errors.New("there are no local keys or remote keys on the validator")
|
||||
}
|
||||
|
||||
@@ -50,8 +51,8 @@ func (c *Client) GetValidatorPubKeys(ctx context.Context) ([]string, error) {
|
||||
for index := range jsonlocal.Keystores {
|
||||
hexKeys[jsonlocal.Keystores[index].ValidatingPubkey] = true
|
||||
}
|
||||
for index := range jsonremote.Keystores {
|
||||
hexKeys[jsonremote.Keystores[index].Pubkey] = true
|
||||
for index := range jsonremote.Data {
|
||||
hexKeys[jsonremote.Data[index].Pubkey] = true
|
||||
}
|
||||
keys := make([]string, 0)
|
||||
for k := range hexKeys {
|
||||
@@ -74,14 +75,14 @@ func (c *Client) GetLocalValidatorKeys(ctx context.Context) (*apimiddleware.List
|
||||
}
|
||||
|
||||
// GetRemoteValidatorKeys calls the keymanager APIs for web3signer validator keys
|
||||
func (c *Client) GetRemoteValidatorKeys(ctx context.Context) (*apimiddleware.ListRemoteKeysResponseJson, error) {
|
||||
func (c *Client) GetRemoteValidatorKeys(ctx context.Context) (*rpc.ListRemoteKeysResponse, error) {
|
||||
remoteBytes, err := c.Get(ctx, remoteKeysPath, client.WithAuthorizationToken(c.Token()))
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), "Prysm Wallet is not of type Web3Signer") {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
jsonremote := &apimiddleware.ListRemoteKeysResponseJson{}
|
||||
jsonremote := &rpc.ListRemoteKeysResponse{}
|
||||
if len(remoteBytes) != 0 {
|
||||
if err := json.Unmarshal(remoteBytes, jsonremote); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse remote keystore list")
|
||||
@@ -107,13 +108,13 @@ func (c *Client) GetFeeRecipientAddresses(ctx context.Context, validators []stri
|
||||
}
|
||||
|
||||
// GetFeeRecipientAddress takes a public key and calls the keymanager API to return its fee recipient.
|
||||
func (c *Client) GetFeeRecipientAddress(ctx context.Context, pubkey string) (*apimiddleware.GetFeeRecipientByPubkeyResponseJson, error) {
|
||||
func (c *Client) GetFeeRecipientAddress(ctx context.Context, pubkey string) (*rpc.GetFeeRecipientByPubkeyResponse, error) {
|
||||
path := strings.Replace(feeRecipientPath, "{pubkey}", pubkey, 1)
|
||||
b, err := c.Get(ctx, path, client.WithAuthorizationToken(c.Token()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
feejson := &apimiddleware.GetFeeRecipientByPubkeyResponseJson{}
|
||||
feejson := &rpc.GetFeeRecipientByPubkeyResponse{}
|
||||
if err := json.Unmarshal(b, feejson); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse fee recipient")
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ const (
|
||||
VersionHeader = "Eth-Consensus-Version"
|
||||
ExecutionPayloadBlindedHeader = "Eth-Execution-Payload-Blinded"
|
||||
ExecutionPayloadValueHeader = "Eth-Execution-Payload-Value"
|
||||
ConsensusBlockValueHeader = "Eth-Consensus-Block-Value"
|
||||
JsonMediaType = "application/json"
|
||||
OctetStreamMediaType = "application/octet-stream"
|
||||
)
|
||||
|
||||
24
api/server/BUILD.bazel
Normal file
24
api/server/BUILD.bazel
Normal file
@@ -0,0 +1,24 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"middleware.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/api/server",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"middleware_test.go",
|
||||
"util_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
15
api/server/middleware.go
Normal file
15
api/server/middleware.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
query := r.URL.Query()
|
||||
NormalizeQueryValues(query)
|
||||
r.URL.RawQuery = query.Encode()
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
54
api/server/middleware_test.go
Normal file
54
api/server/middleware_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestNormalizeQueryValuesHandler(t *testing.T) {
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("next handler"))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
handler := NormalizeQueryValuesHandler(nextHandler)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
inputQuery string
|
||||
expectedQuery string
|
||||
}{
|
||||
{
|
||||
name: "3 values",
|
||||
inputQuery: "key=value1,value2,value3",
|
||||
expectedQuery: "key=value1&key=value2&key=value3", // replace with expected normalized value
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v", rr.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
if req.URL.RawQuery != test.expectedQuery {
|
||||
t.Errorf("query not normalized: got %v want %v", req.URL.RawQuery, test.expectedQuery)
|
||||
}
|
||||
|
||||
if rr.Body.String() != "next handler" {
|
||||
t.Errorf("next handler was not executed")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package helpers
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
@@ -1,4 +1,4 @@
|
||||
package helpers
|
||||
package server
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
"head.go",
|
||||
"head_sync_committee_info.go",
|
||||
"init_sync_process_block.go",
|
||||
"lightclient.go",
|
||||
"log.go",
|
||||
"merge_ascii_art.go",
|
||||
"metrics.go",
|
||||
@@ -48,6 +49,7 @@ go_library(
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
@@ -77,6 +79,8 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
|
||||
@@ -70,11 +70,8 @@ func IsInvalidBlock(e error) bool {
|
||||
if e == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := e.(invalidBlockError)
|
||||
if !ok {
|
||||
return IsInvalidBlock(errors.Unwrap(e))
|
||||
}
|
||||
return true
|
||||
var d invalidBlockError
|
||||
return errors.As(e, &d)
|
||||
}
|
||||
|
||||
// InvalidBlockLVH returns the invalid block last valid hash root. If the error
|
||||
@@ -83,7 +80,8 @@ func InvalidBlockLVH(e error) [32]byte {
|
||||
if e == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
var d invalidBlockError
|
||||
ok := errors.As(e, &d)
|
||||
if !ok {
|
||||
return [32]byte{}
|
||||
}
|
||||
@@ -96,7 +94,8 @@ func InvalidBlockRoot(e error) [32]byte {
|
||||
if e == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
var d invalidBlockError
|
||||
ok := errors.As(e, &d)
|
||||
if !ok {
|
||||
return [32]byte{}
|
||||
}
|
||||
@@ -108,7 +107,8 @@ func InvalidAncestorRoots(e error) [][32]byte {
|
||||
if e == nil {
|
||||
return [][32]byte{}
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
var d invalidBlockError
|
||||
ok := errors.As(e, &d)
|
||||
if !ok {
|
||||
return [][32]byte{}
|
||||
}
|
||||
|
||||
@@ -24,6 +24,9 @@ func TestInvalidBlockRoot(t *testing.T) {
|
||||
err := invalidBlock{error: ErrInvalidPayload, root: [32]byte{'a'}}
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
|
||||
require.DeepEqual(t, [][32]byte(nil), InvalidAncestorRoots(err))
|
||||
|
||||
newErr := errors.Wrap(err, "wrap me")
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(newErr))
|
||||
}
|
||||
|
||||
func TestInvalidRoots(t *testing.T) {
|
||||
@@ -33,4 +36,9 @@ func TestInvalidRoots(t *testing.T) {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
|
||||
require.DeepEqual(t, roots, InvalidAncestorRoots(err))
|
||||
|
||||
newErr := errors.Wrap(err, "wrap me")
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(newErr))
|
||||
require.DeepEqual(t, roots, InvalidAncestorRoots(newErr))
|
||||
}
|
||||
|
||||
@@ -390,7 +390,7 @@ func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32
|
||||
return err
|
||||
}
|
||||
// No op if the sidecar does not exist.
|
||||
if err := s.cfg.BeaconDB.DeleteBlobSidecar(ctx, root); err != nil {
|
||||
if err := s.cfg.BeaconDB.DeleteBlobSidecars(ctx, root); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,8 +2,10 @@ package kzg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -31,6 +33,61 @@ func IsDataAvailable(commitments [][]byte, sidecars []*ethpb.BlobSidecar) error
|
||||
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
|
||||
}
|
||||
|
||||
var ErrKzgProofFailed = errors.New("failed to prove commitment to BlobSidecar Blob data")
|
||||
|
||||
type KzgProofError struct {
|
||||
failed [][48]byte
|
||||
}
|
||||
|
||||
func NewKzgProofError(failed [][48]byte) *KzgProofError {
|
||||
return &KzgProofError{failed: failed}
|
||||
}
|
||||
|
||||
func (e *KzgProofError) Error() string {
|
||||
cmts := make([]string, len(e.failed))
|
||||
for i := range e.failed {
|
||||
cmts[i] = fmt.Sprintf("%#x", e.failed[i])
|
||||
}
|
||||
return fmt.Sprintf("%s: bad commitments=%s", ErrKzgProofFailed.Error(), strings.Join(cmts, ","))
|
||||
}
|
||||
|
||||
func (e *KzgProofError) Failed() [][48]byte {
|
||||
return e.failed
|
||||
}
|
||||
|
||||
func (e *KzgProofError) Unwrap() error {
|
||||
return ErrKzgProofFailed
|
||||
}
|
||||
|
||||
// BisectBlobSidecarKzgProofs tries to batch prove the given sidecars against their own specified commitment.
|
||||
// The caller is responsible for ensuring that the commitments match those specified by the block.
|
||||
// If the batch fails, it will then try to verify the proofs one-by-one.
|
||||
// If an error is returned, it will be a custom error of type KzgProofError that provides access
|
||||
// to the list of commitments that failed.
|
||||
func BisectBlobSidecarKzgProofs(sidecars []*ethpb.BlobSidecar) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
blobs := make([]GoKZG.Blob, len(sidecars))
|
||||
cmts := make([]GoKZG.KZGCommitment, len(sidecars))
|
||||
proofs := make([]GoKZG.KZGProof, len(sidecars))
|
||||
for i, sidecar := range sidecars {
|
||||
blobs[i] = bytesToBlob(sidecar.Blob)
|
||||
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
|
||||
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
|
||||
}
|
||||
if err := kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs); err == nil {
|
||||
return nil
|
||||
}
|
||||
failed := make([][48]byte, 0, len(blobs))
|
||||
for i := range blobs {
|
||||
if err := kzgContext.VerifyBlobKZGProof(blobs[i], cmts[i], proofs[i]); err != nil {
|
||||
failed = append(failed, cmts[i])
|
||||
}
|
||||
}
|
||||
return NewKzgProofError(failed)
|
||||
}
|
||||
|
||||
func bytesToBlob(blob []byte) (ret GoKZG.Blob) {
|
||||
copy(ret[:], blob)
|
||||
return
|
||||
|
||||
245
beacon-chain/blockchain/lightclient.go
Normal file
245
beacon-chain/blockchain/lightclient.go
Normal file
@@ -0,0 +1,245 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
const (
|
||||
finalityBranchNumOfLeaves = 6
|
||||
)
|
||||
|
||||
// CreateLightClientFinalityUpdate - implements https://github.com/ethereum/consensus-specs/blob/3d235740e5f1e641d3b160c8688f26e7dc5a1894/specs/altair/light-client/full-node.md#create_light_client_finality_update
|
||||
// def create_light_client_finality_update(update: LightClientUpdate) -> LightClientFinalityUpdate:
|
||||
//
|
||||
// return LightClientFinalityUpdate(
|
||||
// attested_header=update.attested_header,
|
||||
// finalized_header=update.finalized_header,
|
||||
// finality_branch=update.finality_branch,
|
||||
// sync_aggregate=update.sync_aggregate,
|
||||
// signature_slot=update.signature_slot,
|
||||
// )
|
||||
func CreateLightClientFinalityUpdate(update *ethpbv2.LightClientUpdate) *ethpbv2.LightClientFinalityUpdate {
|
||||
return ðpbv2.LightClientFinalityUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
FinalizedHeader: update.FinalizedHeader,
|
||||
FinalityBranch: update.FinalityBranch,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateLightClientOptimisticUpdate - implements https://github.com/ethereum/consensus-specs/blob/3d235740e5f1e641d3b160c8688f26e7dc5a1894/specs/altair/light-client/full-node.md#create_light_client_optimistic_update
|
||||
// def create_light_client_optimistic_update(update: LightClientUpdate) -> LightClientOptimisticUpdate:
|
||||
//
|
||||
// return LightClientOptimisticUpdate(
|
||||
// attested_header=update.attested_header,
|
||||
// sync_aggregate=update.sync_aggregate,
|
||||
// signature_slot=update.signature_slot,
|
||||
// )
|
||||
func CreateLightClientOptimisticUpdate(update *ethpbv2.LightClientUpdate) *ethpbv2.LightClientOptimisticUpdate {
|
||||
return ðpbv2.LightClientOptimisticUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState) (*ethpbv2.LightClientUpdate, error) {
|
||||
// assert compute_epoch_at_slot(attested_state.slot) >= ALTAIR_FORK_EPOCH
|
||||
attestedEpoch := slots.ToEpoch(attestedState.Slot())
|
||||
if attestedEpoch < params.BeaconConfig().AltairForkEpoch {
|
||||
return nil, fmt.Errorf("invalid attested epoch %d", attestedEpoch)
|
||||
}
|
||||
|
||||
// assert sum(block.message.body.sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||
syncAggregate, err := block.Block().Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get sync aggregate %v", err)
|
||||
}
|
||||
|
||||
if syncAggregate.SyncCommitteeBits.Count() < params.BeaconConfig().MinSyncCommitteeParticipants {
|
||||
return nil, fmt.Errorf("invalid sync committee bits count %d", syncAggregate.SyncCommitteeBits.Count())
|
||||
}
|
||||
|
||||
// assert state.slot == state.latest_block_header.slot
|
||||
if state.Slot() != state.LatestBlockHeader().Slot {
|
||||
return nil, fmt.Errorf("state slot %d not equal to latest block header slot %d", state.Slot(), state.LatestBlockHeader().Slot)
|
||||
}
|
||||
|
||||
// assert hash_tree_root(header) == hash_tree_root(block.message)
|
||||
header := state.LatestBlockHeader()
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get state root %v", err)
|
||||
}
|
||||
header.StateRoot = stateRoot[:]
|
||||
|
||||
headerRoot, err := header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get header root %v", err)
|
||||
}
|
||||
|
||||
blockRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get block root %v", err)
|
||||
}
|
||||
|
||||
if headerRoot != blockRoot {
|
||||
return nil, fmt.Errorf("header root %#x not equal to block root %#x", headerRoot, blockRoot)
|
||||
}
|
||||
|
||||
// assert attested_state.slot == attested_state.latest_block_header.slot
|
||||
if attestedState.Slot() != attestedState.LatestBlockHeader().Slot {
|
||||
return nil, fmt.Errorf("attested state slot %d not equal to attested latest block header slot %d", attestedState.Slot(), attestedState.LatestBlockHeader().Slot)
|
||||
}
|
||||
|
||||
// attested_header = attested_state.latest_block_header.copy()
|
||||
attestedHeader := attestedState.LatestBlockHeader()
|
||||
|
||||
// attested_header.state_root = hash_tree_root(attested_state)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get attested state root %v", err)
|
||||
}
|
||||
attestedHeader.StateRoot = attestedStateRoot[:]
|
||||
|
||||
// assert hash_tree_root(attested_header) == block.message.parent_root
|
||||
attestedHeaderRoot, err := attestedHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get attested header root %v", err)
|
||||
}
|
||||
|
||||
if attestedHeaderRoot != block.Block().ParentRoot() {
|
||||
return nil, fmt.Errorf("attested header root %#x not equal to block parent root %#x", attestedHeaderRoot, block.Block().ParentRoot())
|
||||
}
|
||||
|
||||
// Return result
|
||||
attestedHeaderResult := ðpbv1.BeaconBlockHeader{
|
||||
Slot: attestedHeader.Slot,
|
||||
ProposerIndex: attestedHeader.ProposerIndex,
|
||||
ParentRoot: attestedHeader.ParentRoot,
|
||||
StateRoot: attestedHeader.StateRoot,
|
||||
BodyRoot: attestedHeader.BodyRoot,
|
||||
}
|
||||
|
||||
syncAggregateResult := ðpbv1.SyncAggregate{
|
||||
SyncCommitteeBits: syncAggregate.SyncCommitteeBits,
|
||||
SyncCommitteeSignature: syncAggregate.SyncCommitteeSignature,
|
||||
}
|
||||
|
||||
result := ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: attestedHeaderResult,
|
||||
SyncAggregate: syncAggregateResult,
|
||||
SignatureSlot: block.Block().Slot(),
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func NewLightClientFinalityUpdateFromBeaconState(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
finalizedBlock interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.LightClientUpdate, error) {
|
||||
result, err := NewLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx,
|
||||
state,
|
||||
block,
|
||||
attestedState,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Indicate finality whenever possible
|
||||
var finalizedHeader *ethpbv1.BeaconBlockHeader
|
||||
var finalityBranch [][]byte
|
||||
|
||||
if finalizedBlock != nil && !finalizedBlock.IsNil() {
|
||||
if finalizedBlock.Block().Slot() != 0 {
|
||||
tempFinalizedHeader, err := finalizedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get finalized header %v", err)
|
||||
}
|
||||
finalizedHeader = migration.V1Alpha1SignedHeaderToV1(tempFinalizedHeader).GetMessage()
|
||||
|
||||
finalizedHeaderRoot, err := finalizedHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get finalized header root %v", err)
|
||||
}
|
||||
|
||||
if finalizedHeaderRoot != bytesutil.ToBytes32(attestedState.FinalizedCheckpoint().Root) {
|
||||
return nil, fmt.Errorf("finalized header root %#x not equal to attested finalized checkpoint root %#x", finalizedHeaderRoot, bytesutil.ToBytes32(attestedState.FinalizedCheckpoint().Root))
|
||||
}
|
||||
} else {
|
||||
if !bytes.Equal(attestedState.FinalizedCheckpoint().Root, make([]byte, 32)) {
|
||||
return nil, fmt.Errorf("invalid finalized header root %v", attestedState.FinalizedCheckpoint().Root)
|
||||
}
|
||||
|
||||
finalizedHeader = ðpbv1.BeaconBlockHeader{
|
||||
Slot: 0,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
|
||||
var bErr error
|
||||
finalityBranch, bErr = attestedState.FinalizedRootProof(ctx)
|
||||
if bErr != nil {
|
||||
return nil, fmt.Errorf("could not get finalized root proof %v", bErr)
|
||||
}
|
||||
} else {
|
||||
finalizedHeader = ðpbv1.BeaconBlockHeader{
|
||||
Slot: 0,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}
|
||||
|
||||
finalityBranch = make([][]byte, finalityBranchNumOfLeaves)
|
||||
for i := 0; i < finalityBranchNumOfLeaves; i++ {
|
||||
finalityBranch[i] = make([]byte, 32)
|
||||
}
|
||||
}
|
||||
|
||||
result.FinalizedHeader = finalizedHeader
|
||||
result.FinalityBranch = finalityBranch
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func NewLightClientUpdateFromFinalityUpdate(update *ethpbv2.LightClientFinalityUpdate) *ethpbv2.LightClientUpdate {
|
||||
return ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
FinalizedHeader: update.FinalizedHeader,
|
||||
FinalityBranch: update.FinalityBranch,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLightClientUpdateFromOptimisticUpdate(update *ethpbv2.LightClientOptimisticUpdate) *ethpbv2.LightClientUpdate {
|
||||
return ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
SyncAggregate: update.SyncAggregate,
|
||||
SignatureSlot: update.SignatureSlot,
|
||||
}
|
||||
}
|
||||
160
beacon-chain/blockchain/lightclient_test.go
Normal file
160
beacon-chain/blockchain/lightclient_test.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
type testlc struct {
|
||||
t *testing.T
|
||||
ctx context.Context
|
||||
state state.BeaconState
|
||||
block interfaces.ReadOnlySignedBeaconBlock
|
||||
attestedState state.BeaconState
|
||||
attestedHeader *ethpb.BeaconBlockHeader
|
||||
}
|
||||
|
||||
func newTestLc(t *testing.T) *testlc {
|
||||
return &testlc{t: t}
|
||||
}
|
||||
|
||||
func (l *testlc) setupTest() *testlc {
|
||||
ctx := context.Background()
|
||||
|
||||
slot := primitives.Slot(params.BeaconConfig().AltairForkEpoch * primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)).Add(1)
|
||||
|
||||
attestedState, err := util.NewBeaconStateCapella()
|
||||
require.NoError(l.t, err)
|
||||
err = attestedState.SetSlot(slot)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parent := util.NewBeaconBlockCapella()
|
||||
parent.Block.Slot = slot
|
||||
|
||||
signedParent, err := blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parentHeader, err := signedParent.Header()
|
||||
require.NoError(l.t, err)
|
||||
attestedHeader := parentHeader.Header
|
||||
|
||||
err = attestedState.SetLatestBlockHeader(attestedHeader)
|
||||
require.NoError(l.t, err)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
parent.Block.StateRoot = attestedStateRoot[:]
|
||||
signedParent, err = blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
state, err := util.NewBeaconStateCapella()
|
||||
require.NoError(l.t, err)
|
||||
err = state.SetSlot(slot)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parentRoot, err := signedParent.Block().HashTreeRoot()
|
||||
require.NoError(l.t, err)
|
||||
|
||||
block := util.NewBeaconBlockCapella()
|
||||
block.Block.Slot = slot
|
||||
block.Block.ParentRoot = parentRoot[:]
|
||||
|
||||
for i := uint64(0); i < params.BeaconConfig().MinSyncCommitteeParticipants; i++ {
|
||||
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
|
||||
}
|
||||
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
h, err := signedBlock.Header()
|
||||
require.NoError(l.t, err)
|
||||
|
||||
err = state.SetLatestBlockHeader(h.Header)
|
||||
require.NoError(l.t, err)
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
signedBlock, err = blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
l.state = state
|
||||
l.attestedState = attestedState
|
||||
l.attestedHeader = attestedHeader
|
||||
l.block = signedBlock
|
||||
l.ctx = ctx
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *testlc) checkAttestedHeader(update *ethpbv2.LightClientUpdate) {
|
||||
require.Equal(l.t, l.attestedHeader.Slot, update.AttestedHeader.Slot, "Attested header slot is not equal")
|
||||
require.Equal(l.t, l.attestedHeader.ProposerIndex, update.AttestedHeader.ProposerIndex, "Attested header proposer index is not equal")
|
||||
require.DeepSSZEqual(l.t, l.attestedHeader.ParentRoot, update.AttestedHeader.ParentRoot, "Attested header parent root is not equal")
|
||||
require.DeepSSZEqual(l.t, l.attestedHeader.BodyRoot, update.AttestedHeader.BodyRoot, "Attested header body root is not equal")
|
||||
|
||||
attestedStateRoot, err := l.attestedState.HashTreeRoot(l.ctx)
|
||||
require.NoError(l.t, err)
|
||||
require.DeepSSZEqual(l.t, attestedStateRoot[:], update.AttestedHeader.StateRoot, "Attested header state root is not equal")
|
||||
}
|
||||
|
||||
func (l *testlc) checkSyncAggregate(update *ethpbv2.LightClientUpdate) {
|
||||
syncAggregate, err := l.block.Block().Body().SyncAggregate()
|
||||
require.NoError(l.t, err)
|
||||
require.DeepSSZEqual(l.t, syncAggregate.SyncCommitteeBits, update.SyncAggregate.SyncCommitteeBits, "SyncAggregate bits is not equal")
|
||||
require.DeepSSZEqual(l.t, syncAggregate.SyncCommitteeSignature, update.SyncAggregate.SyncCommitteeSignature, "SyncAggregate signature is not equal")
|
||||
}
|
||||
|
||||
func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T) {
|
||||
l := newTestLc(t).setupTest()
|
||||
|
||||
update, err := NewLightClientOptimisticUpdateFromBeaconState(l.ctx, l.state, l.block, l.attestedState)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
|
||||
|
||||
l.checkSyncAggregate(update)
|
||||
l.checkAttestedHeader(update)
|
||||
|
||||
require.Equal(t, (*v1.BeaconBlockHeader)(nil), update.FinalizedHeader, "Finalized header is not nil")
|
||||
require.DeepSSZEqual(t, ([][]byte)(nil), update.FinalityBranch, "Finality branch is not nil")
|
||||
}
|
||||
|
||||
func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
l := newTestLc(t).setupTest()
|
||||
|
||||
update, err := NewLightClientFinalityUpdateFromBeaconState(l.ctx, l.state, l.block, l.attestedState, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
|
||||
|
||||
l.checkSyncAggregate(update)
|
||||
l.checkAttestedHeader(update)
|
||||
|
||||
zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
require.NotNil(t, update.FinalizedHeader, "Finalized header is nil")
|
||||
require.Equal(t, primitives.Slot(0), update.FinalizedHeader.Slot, "Finalized header slot is not zero")
|
||||
require.Equal(t, primitives.ValidatorIndex(0), update.FinalizedHeader.ProposerIndex, "Finalized header proposer index is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.ParentRoot, "Finalized header parent root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.StateRoot, "Finalized header state root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.BodyRoot, "Finalized header body root is not zero")
|
||||
require.Equal(t, finalityBranchNumOfLeaves, len(update.FinalityBranch), "Invalid finality branch leaves")
|
||||
for _, leaf := range update.FinalityBranch {
|
||||
require.DeepSSZEqual(t, zeroHash, leaf, "Leaf is not zero")
|
||||
}
|
||||
}
|
||||
@@ -16,7 +16,7 @@ func TestReportEpochMetrics_BadHeadState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, h.SetValidators(nil))
|
||||
err = reportEpochMetrics(context.Background(), s, h)
|
||||
require.ErrorContains(t, "failed to initialize precompute: nil validators in state", err)
|
||||
require.ErrorContains(t, "failed to initialize precompute: state has nil validator slice", err)
|
||||
}
|
||||
|
||||
func TestReportEpochMetrics_BadAttestation(t *testing.T) {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
@@ -162,7 +163,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
|
||||
return preStateVersion, preStateHeader, nil
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock) error {
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
@@ -265,7 +266,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := s.databaseDACheck(ctx, b); err != nil {
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrap(err, "could not validate blob data availability")
|
||||
}
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
|
||||
@@ -333,33 +334,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
func commitmentsToCheck(b consensusblocks.ROBlock, current primitives.Slot) [][]byte {
|
||||
if b.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
kzgCommitments, err := b.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return kzgCommitments
|
||||
}
|
||||
|
||||
func (s *Service) databaseDACheck(ctx context.Context, b consensusblocks.ROBlock) error {
|
||||
commitments := commitmentsToCheck(b, s.CurrentSlot())
|
||||
if len(commitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, b.Root())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob sidecars")
|
||||
}
|
||||
return kzg.IsDataAvailable(commitments, sidecars)
|
||||
}
|
||||
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
e := coreTime.CurrentEpoch(st)
|
||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||
@@ -564,6 +538,10 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
if len(sidecars) >= expected {
|
||||
s.blobNotifiers.delete(root)
|
||||
if err := kzg.IsDataAvailable(kzgCommitments, sidecars); err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", root)).Warn("removing blob sidecars with invalid proofs")
|
||||
if err2 := s.cfg.BeaconDB.DeleteBlobSidecars(ctx, root); err2 != nil {
|
||||
log.WithError(err2).Error("could not delete sidecars")
|
||||
}
|
||||
return err
|
||||
}
|
||||
logBlobSidecar(sidecars, t)
|
||||
@@ -594,6 +572,10 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
return errors.Wrap(err, "could not get blob sidecars")
|
||||
}
|
||||
if err := kzg.IsDataAvailable(kzgCommitments, sidecars); err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", root)).Warn("removing blob sidecars with invalid proofs")
|
||||
if err2 := s.cfg.BeaconDB.DeleteBlobSidecars(ctx, root); err2 != nil {
|
||||
log.WithError(err2).Error("could not delete sidecars")
|
||||
}
|
||||
return err
|
||||
}
|
||||
logBlobSidecar(sidecars, t)
|
||||
@@ -649,11 +631,13 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
return
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: headBlock.Block(),
|
||||
})
|
||||
s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
@@ -17,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
@@ -39,7 +39,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
@@ -68,7 +67,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, rwsb)
|
||||
}
|
||||
err := service.onBlockBatch(ctx, blks)
|
||||
err := service.onBlockBatch(ctx, blks, &das.MockAvailabilityStore{})
|
||||
require.NoError(t, err)
|
||||
jcp := service.CurrentJustifiedCheckpt()
|
||||
jroot := bytesutil.ToBytes32(jcp.Root)
|
||||
@@ -98,7 +97,7 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, service.saveInitSyncBlock(ctx, rwsb.Root(), wsb))
|
||||
blks = append(blks, rwsb)
|
||||
}
|
||||
require.NoError(t, service.onBlockBatch(ctx, blks))
|
||||
require.NoError(t, service.onBlockBatch(ctx, blks, &das.MockAvailabilityStore{}))
|
||||
}
|
||||
|
||||
func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
@@ -1103,12 +1102,15 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(4)
|
||||
var lock sync.Mutex
|
||||
go func() {
|
||||
preState, err := service.getBlockPreState(ctx, wsb1.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb1)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb1, r1, postState, true))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
@@ -1116,7 +1118,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb2)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb2, r2, postState, true))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
@@ -1124,7 +1128,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb3)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb3, r3, postState, true))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
@@ -1132,7 +1138,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb4)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(ctx, wsb4, r4, postState, true))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
@@ -1936,7 +1944,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
rwsb, err := consensusblocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
// We use onBlockBatch here because the valid chain is missing in forkchoice
|
||||
require.NoError(t, service.onBlockBatch(ctx, []consensusblocks.ROBlock{rwsb}))
|
||||
require.NoError(t, service.onBlockBatch(ctx, []consensusblocks.ROBlock{rwsb}, &das.MockAvailabilityStore{}))
|
||||
// Check that the head is now VALID and the node is not optimistic
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
headRoot, err = service.HeadRoot(ctx)
|
||||
@@ -2038,71 +2046,3 @@ func driftGenesisTime(s *Service, slot, delay int64) {
|
||||
offset := slot*int64(params.BeaconConfig().SecondsPerSlot) - delay
|
||||
s.SetGenesisTime(time.Unix(time.Now().Unix()-offset, 0))
|
||||
}
|
||||
|
||||
func Test_commitmentsToCheck(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
commits := [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
commits [][]byte
|
||||
block func(*testing.T) consensusblocks.ROBlock
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "pre deneb",
|
||||
block: func(t *testing.T) consensusblocks.ROBlock {
|
||||
bb := util.NewBeaconBlockBellatrix()
|
||||
sb, err := consensusblocks.NewSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
rb, err := consensusblocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "commitments within da",
|
||||
block: func(t *testing.T) consensusblocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
d.Block.Slot = 100
|
||||
sb, err := consensusblocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := consensusblocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
commits: commits,
|
||||
slot: 100,
|
||||
},
|
||||
{
|
||||
name: "commitments outside da",
|
||||
block: func(t *testing.T) consensusblocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
// block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
sb, err := consensusblocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := consensusblocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
b := c.block(t)
|
||||
co := commitmentsToCheck(b, c.slot)
|
||||
require.Equal(t, len(c.commits), len(co))
|
||||
for i := 0; i < len(c.commits); i++ {
|
||||
require.Equal(t, true, bytes.Equal(c.commits[i], co[i]))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package blockchain
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -33,7 +35,7 @@ var epochsSinceFinalitySaveHotStateDB = primitives.Epoch(100)
|
||||
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error
|
||||
HasBlock(ctx context.Context, root [32]byte) bool
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
BlockBeingSynced([32]byte) bool
|
||||
@@ -58,6 +60,11 @@ type SlashingReceiver interface {
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
|
||||
defer span.End()
|
||||
// Return early if the block has been synced
|
||||
if s.InForkchoice(blockRoot) {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring already synced block")
|
||||
return nil
|
||||
}
|
||||
receivedTime := time.Now()
|
||||
s.blockBeingSynced.set(blockRoot)
|
||||
defer s.blockBeingSynced.unset(blockRoot)
|
||||
@@ -185,7 +192,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
|
||||
// the state, performing batch verification of all collected signatures and then performing the appropriate
|
||||
// actions for a block post-transition.
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock) error {
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
@@ -193,7 +200,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
// Apply state transition on the incoming newly received block batches, one by one.
|
||||
if err := s.onBlockBatch(ctx, blocks); err != nil {
|
||||
if err := s.onBlockBatch(ctx, blocks, avs); err != nil {
|
||||
err := errors.Wrap(err, "could not process block in batch")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
blockchainTesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -240,7 +241,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rwsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
err = s.ReceiveBlockBatch(ctx, []blocks.ROBlock{rwsb})
|
||||
err = s.ReceiveBlockBatch(ctx, []blocks.ROBlock{rwsb}, &das.MockAvailabilityStore{})
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
|
||||
@@ -2,6 +2,7 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
@@ -23,10 +24,13 @@ import (
|
||||
|
||||
type mockBeaconNode struct {
|
||||
stateFeed *event.Feed
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// StateFeed mocks the same method in the beacon node.
|
||||
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
|
||||
mbn.mu.Lock()
|
||||
defer mbn.mu.Unlock()
|
||||
if mbn.stateFeed == nil {
|
||||
mbn.stateFeed = new(event.Feed)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ go_library(
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
opfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
@@ -72,6 +73,7 @@ type ChainService struct {
|
||||
OptimisticRoots map[[32]byte]bool
|
||||
BlockSlot primitives.Slot
|
||||
SyncingRoot [32]byte
|
||||
Blobs []*ethpb.BlobSidecar
|
||||
}
|
||||
|
||||
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
@@ -206,7 +208,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes blocks in batches from initial-sync.
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock) error {
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
if s.State == nil {
|
||||
return ErrNilState
|
||||
}
|
||||
@@ -612,6 +614,7 @@ func (c *ChainService) BlockBeingSynced(root [32]byte) bool {
|
||||
}
|
||||
|
||||
// ReceiveBlob implements the same method in the chain service
|
||||
func (*ChainService) ReceiveBlob(_ context.Context, _ *ethpb.BlobSidecar) error {
|
||||
func (c *ChainService) ReceiveBlob(_ context.Context, b *ethpb.BlobSidecar) error {
|
||||
c.Blobs = append(c.Blobs, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
4
beacon-chain/cache/checkpoint_state.go
vendored
4
beacon-chain/cache/checkpoint_state.go
vendored
@@ -42,7 +42,7 @@ func NewCheckpointStateCache() *CheckpointStateCache {
|
||||
// StateByCheckpoint fetches state by checkpoint. Returns true with a
|
||||
// reference to the CheckpointState info, if exists. Otherwise returns false, nil.
|
||||
func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
h, err := hash.HashProto(cp)
|
||||
h, err := hash.Proto(cp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -62,7 +62,7 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.Be
|
||||
// AddCheckpointState adds CheckpointState object to the cache. This method also trims the least
|
||||
// recently added CheckpointState object if the cache size has ready the max cache size limit.
|
||||
func (c *CheckpointStateCache) AddCheckpointState(cp *ethpb.Checkpoint, s state.ReadOnlyBeaconState) error {
|
||||
h, err := hash.HashProto(cp)
|
||||
h, err := hash.Proto(cp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
|
||||
return
|
||||
}
|
||||
|
||||
depRoot, err := hash.HashProto(d)
|
||||
depRoot, err := hash.Proto(d)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not remove deposit")
|
||||
return
|
||||
@@ -109,7 +109,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
|
||||
|
||||
idx := -1
|
||||
for i, ctnr := range dc.pendingDeposits {
|
||||
h, err := hash.HashProto(ctnr.Deposit)
|
||||
h, err := hash.Proto(ctnr.Deposit)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not hash deposit")
|
||||
continue
|
||||
|
||||
@@ -769,7 +769,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
ctrs := []*ethpb.DepositContainer{}
|
||||
var ctrs []*ethpb.DepositContainer
|
||||
for i := 0; i < 2000; i++ {
|
||||
ctrs = append(ctrs, generateCtr(uint64(10+(i/2)), int64(i)))
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ func Test_BaseReward(t *testing.T) {
|
||||
valIdx: 2,
|
||||
st: genState(1),
|
||||
want: 0,
|
||||
errString: "index 2 out of range",
|
||||
errString: "validator index 2 does not exist",
|
||||
},
|
||||
{
|
||||
name: "active balance is 32eth",
|
||||
@@ -89,7 +89,7 @@ func Test_BaseRewardWithTotalBalance(t *testing.T) {
|
||||
valIdx: 2,
|
||||
activeBalance: 1,
|
||||
want: 0,
|
||||
errString: "index 2 out of range",
|
||||
errString: "validator index 2 does not exist",
|
||||
},
|
||||
{
|
||||
name: "active balance is 1",
|
||||
|
||||
@@ -28,10 +28,10 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
}
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoAltair(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
return st
|
||||
}
|
||||
|
||||
@@ -69,6 +69,15 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no active validators, epoch 100",
|
||||
args: args{
|
||||
state: getState(t, 0), // Regression test for divide by zero. Issue #13051.
|
||||
epoch: 100,
|
||||
},
|
||||
wantErr: true,
|
||||
errString: "no active validator indices",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -42,7 +42,6 @@ go_library(
|
||||
"//math:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/slashings:go_default_library",
|
||||
@@ -100,7 +99,6 @@ go_test(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation:go_default_library",
|
||||
|
||||
@@ -55,7 +55,7 @@ func ProcessVoluntaryExits(
|
||||
if len(exits) == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
maxExitEpoch, churn := v.ValidatorsMaxExitEpochAndChurn(beaconState)
|
||||
maxExitEpoch, churn := v.MaxExitEpochAndChurn(beaconState)
|
||||
var exitEpoch primitives.Epoch
|
||||
for idx, exit := range exits {
|
||||
if exit == nil || exit.Exit == nil {
|
||||
|
||||
@@ -82,7 +82,7 @@ func ProcessRandaoNoVerify(
|
||||
for i, x := range blockRandaoReveal {
|
||||
latestMixSlice[i] ^= x
|
||||
}
|
||||
if err := beaconState.UpdateRandaoMixesAtIndex(uint64(currentEpoch%latestMixesLength), latestMixSlice); err != nil {
|
||||
if err := beaconState.UpdateRandaoMixesAtIndex(uint64(currentEpoch%latestMixesLength), [32]byte(latestMixSlice)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
@@ -236,7 +235,7 @@ func BLSChangesSignatureBatch(
|
||||
return nil, errors.Wrap(err, "could not convert bytes to public key")
|
||||
}
|
||||
batch.PublicKeys[i] = publicKey
|
||||
htr, err := signing.SigningData(change.Message.HashTreeRoot, domain)
|
||||
htr, err := signing.Data(change.Message.HashTreeRoot, domain)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute BLSToExecutionChange signing data")
|
||||
}
|
||||
@@ -251,7 +250,7 @@ func BLSChangesSignatureBatch(
|
||||
// is from a previous fork.
|
||||
func VerifyBLSChangeSignature(
|
||||
st state.ReadOnlyBeaconState,
|
||||
change *ethpbv2.SignedBLSToExecutionChange,
|
||||
change *ethpb.SignedBLSToExecutionChange,
|
||||
) error {
|
||||
c := params.BeaconConfig()
|
||||
domain, err := signing.ComputeDomain(c.DomainBLSToExecutionChange, c.GenesisForkVersion, st.GenesisValidatorsRoot())
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
@@ -152,7 +151,7 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
|
||||
}
|
||||
|
||||
_, err = blocks.ProcessBLSToExecutionChange(st, signed)
|
||||
require.ErrorContains(t, "out of range", err)
|
||||
require.ErrorContains(t, "out of bounds", err)
|
||||
})
|
||||
|
||||
t.Run("signature does not verify", func(t *testing.T) {
|
||||
@@ -1209,8 +1208,7 @@ func TestBLSChangesSignatureBatch(t *testing.T) {
|
||||
require.Equal(t, true, verify)
|
||||
|
||||
// Verify a single change
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, change))
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, signedChanges[0]))
|
||||
}
|
||||
|
||||
func TestBLSChangesSignatureBatchWrongFork(t *testing.T) {
|
||||
@@ -1274,8 +1272,7 @@ func TestBLSChangesSignatureBatchWrongFork(t *testing.T) {
|
||||
require.Equal(t, false, verify)
|
||||
|
||||
// Verify a single change
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.ErrorIs(t, signing.ErrSigFailedToVerify, blocks.VerifyBLSChangeSignature(st, change))
|
||||
require.ErrorIs(t, signing.ErrSigFailedToVerify, blocks.VerifyBLSChangeSignature(st, signedChanges[0]))
|
||||
}
|
||||
|
||||
func TestBLSChangesSignatureBatchFromBellatrix(t *testing.T) {
|
||||
@@ -1362,7 +1359,6 @@ func TestBLSChangesSignatureBatchFromBellatrix(t *testing.T) {
|
||||
require.Equal(t, true, verify)
|
||||
|
||||
// Verify a single change
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, change))
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, signedChanges[0]))
|
||||
params.OverrideBeaconConfig(savedConfig)
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@ func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state
|
||||
if isActive && belowEjectionBalance {
|
||||
// Here is fine to do a quadratic loop since this should
|
||||
// barely happen
|
||||
maxExitEpoch, churn := validators.ValidatorsMaxExitEpochAndChurn(state)
|
||||
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(state)
|
||||
state, _, err = validators.InitiateValidatorExit(ctx, state, primitives.ValidatorIndex(idx), maxExitEpoch, churn)
|
||||
if err != nil && !errors.Is(err, validators.ValidatorAlreadyExitedErr) {
|
||||
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
|
||||
@@ -137,16 +137,10 @@ func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state
|
||||
return nil, errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
|
||||
churnLimit, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get churn limit")
|
||||
}
|
||||
churnLimit := helpers.ValidatorActivationChurnLimit(activeValidatorCount)
|
||||
|
||||
if state.Version() >= version.Deneb {
|
||||
// Cap churn limit to max per epoch churn limit. New in EIP7514.
|
||||
if churnLimit > params.BeaconConfig().MaxPerEpochActivationChurnLimit {
|
||||
churnLimit = params.BeaconConfig().MaxPerEpochActivationChurnLimit
|
||||
}
|
||||
churnLimit = helpers.ValidatorActivationChurnLimitDeneb(activeValidatorCount)
|
||||
}
|
||||
|
||||
// Prevent churn limit cause index out of bound.
|
||||
@@ -355,7 +349,7 @@ func ProcessRandaoMixesReset(state state.BeaconState) (state.BeaconState, error)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := state.UpdateRandaoMixesAtIndex(uint64(nextEpoch%randaoMixLength), mix); err != nil {
|
||||
if err := state.UpdateRandaoMixesAtIndex(uint64(nextEpoch%randaoMixLength), [32]byte(mix)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -311,8 +311,7 @@ func TestProcessRegistryUpdates_EligibleToActivate(t *testing.T) {
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 6, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
limit, err := helpers.ValidatorChurnLimit(0)
|
||||
require.NoError(t, err)
|
||||
limit := helpers.ValidatorActivationChurnLimit(0)
|
||||
for i := uint64(0); i < limit+10; i++ {
|
||||
base.Validators = append(base.Validators, ðpb.Validator{
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
|
||||
@@ -99,7 +99,7 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsCurrentPeriodSyncCommittee(state, 12390192)
|
||||
require.ErrorContains(t, "index 12390192 out of range", err)
|
||||
require.ErrorContains(t, "validator index 12390192 does not exist", err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
@@ -182,7 +182,7 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ok, err := IsNextPeriodSyncCommittee(state, 120391029)
|
||||
require.ErrorContains(t, "index 120391029 out of range", err)
|
||||
require.ErrorContains(t, "validator index 120391029 does not exist", err)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
@@ -282,7 +282,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := CurrentPeriodSyncSubcommitteeIndices(state, 129301923)
|
||||
require.ErrorContains(t, "index 129301923 out of range", err)
|
||||
require.ErrorContains(t, "validator index 129301923 does not exist", err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
@@ -367,7 +367,7 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
index, err := NextPeriodSyncSubcommitteeIndices(state, 21093019)
|
||||
require.ErrorContains(t, "index 21093019 out of range", err)
|
||||
require.ErrorContains(t, "validator index 21093019 does not exist", err)
|
||||
require.DeepEqual(t, []primitives.CommitteeIndex(nil), index)
|
||||
}
|
||||
|
||||
|
||||
@@ -136,6 +136,10 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(indices) == 0 {
|
||||
return nil, errors.New("no active validator indices")
|
||||
}
|
||||
|
||||
if err := UpdateCommitteeCache(ctx, s, epoch); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update committee cache")
|
||||
}
|
||||
@@ -206,10 +210,7 @@ func ActivationExitEpoch(epoch primitives.Epoch) primitives.Epoch {
|
||||
return epoch + 1 + params.BeaconConfig().MaxSeedLookahead
|
||||
}
|
||||
|
||||
// ValidatorChurnLimit returns the number of validators that are allowed to
|
||||
// enter and exit validator pool for an epoch.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// calculateChurnLimit based on the formula in the spec.
|
||||
//
|
||||
// def get_validator_churn_limit(state: BeaconState) -> uint64:
|
||||
// """
|
||||
@@ -217,12 +218,32 @@ func ActivationExitEpoch(epoch primitives.Epoch) primitives.Epoch {
|
||||
// """
|
||||
// active_validator_indices = get_active_validator_indices(state, get_current_epoch(state))
|
||||
// return max(MIN_PER_EPOCH_CHURN_LIMIT, uint64(len(active_validator_indices)) // CHURN_LIMIT_QUOTIENT)
|
||||
func ValidatorChurnLimit(activeValidatorCount uint64) (uint64, error) {
|
||||
func calculateChurnLimit(activeValidatorCount uint64) uint64 {
|
||||
churnLimit := activeValidatorCount / params.BeaconConfig().ChurnLimitQuotient
|
||||
if churnLimit < params.BeaconConfig().MinPerEpochChurnLimit {
|
||||
churnLimit = params.BeaconConfig().MinPerEpochChurnLimit
|
||||
return params.BeaconConfig().MinPerEpochChurnLimit
|
||||
}
|
||||
return churnLimit, nil
|
||||
return churnLimit
|
||||
}
|
||||
|
||||
// ValidatorActivationChurnLimit returns the maximum number of validators that can be activated in a slot.
|
||||
func ValidatorActivationChurnLimit(activeValidatorCount uint64) uint64 {
|
||||
return calculateChurnLimit(activeValidatorCount)
|
||||
}
|
||||
|
||||
// ValidatorExitChurnLimit returns the maximum number of validators that can be exited in a slot.
|
||||
func ValidatorExitChurnLimit(activeValidatorCount uint64) uint64 {
|
||||
return calculateChurnLimit(activeValidatorCount)
|
||||
}
|
||||
|
||||
// ValidatorActivationChurnLimitDeneb returns the maximum number of validators that can be activated in a slot post Deneb.
|
||||
func ValidatorActivationChurnLimitDeneb(activeValidatorCount uint64) uint64 {
|
||||
limit := calculateChurnLimit(activeValidatorCount)
|
||||
// New in Deneb.
|
||||
if limit > params.BeaconConfig().MaxPerEpochActivationChurnLimit {
|
||||
return params.BeaconConfig().MaxPerEpochActivationChurnLimit
|
||||
}
|
||||
return limit
|
||||
}
|
||||
|
||||
// BeaconProposerIndex returns proposer index of a current slot.
|
||||
|
||||
@@ -367,9 +367,50 @@ func TestChurnLimit_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
resultChurn, err := ValidatorChurnLimit(validatorCount)
|
||||
resultChurn := ValidatorActivationChurnLimit(validatorCount)
|
||||
assert.Equal(t, test.wantedChurn, resultChurn, "ValidatorActivationChurnLimit(%d)", test.validatorCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChurnLimitDeneb_OK(t *testing.T) {
|
||||
tests := []struct {
|
||||
validatorCount int
|
||||
wantedChurn uint64
|
||||
}{
|
||||
{1000, 4},
|
||||
{100000, 4},
|
||||
{1000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
|
||||
{2000000, params.BeaconConfig().MaxPerEpochActivationChurnLimit},
|
||||
}
|
||||
|
||||
defer ClearCache()
|
||||
|
||||
for _, test := range tests {
|
||||
ClearCache()
|
||||
|
||||
// Create validators
|
||||
validators := make([]*ethpb.Validator, test.validatorCount)
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize beacon state
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Slot: 1,
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.wantedChurn, resultChurn, "ValidatorChurnLimit(%d)", test.validatorCount)
|
||||
|
||||
// Get active validator count
|
||||
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test churn limit calculation
|
||||
resultChurn := ValidatorActivationChurnLimitDeneb(validatorCount)
|
||||
assert.Equal(t, test.wantedChurn, resultChurn)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -519,12 +560,24 @@ func TestActiveValidatorIndices(t *testing.T) {
|
||||
},
|
||||
want: []primitives.ValidatorIndex{0, 2, 3},
|
||||
},
|
||||
{
|
||||
name: "impossible_zero_validators", // Regression test for issue #13051
|
||||
args: args{
|
||||
state: ðpb.BeaconState{
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
Validators: make([]*ethpb.Validator, 0),
|
||||
},
|
||||
epoch: 10,
|
||||
},
|
||||
wantedErr: "no active validator indices",
|
||||
},
|
||||
}
|
||||
defer ClearCache()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.args.state)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetValidators(tt.args.state.Validators))
|
||||
got, err := ActiveValidatorIndices(context.Background(), s, tt.args.epoch)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
|
||||
@@ -80,10 +80,7 @@ func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconS
|
||||
T := cfg.MaxEffectiveBalance / cfg.GweiPerEth
|
||||
|
||||
// Validator churn limit.
|
||||
delta, err := ValidatorChurnLimit(N)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot obtain active validator churn limit: %w", err)
|
||||
}
|
||||
delta := ValidatorExitChurnLimit(N)
|
||||
|
||||
// Balance top-ups.
|
||||
Delta := uint64(cfg.SlotsPerEpoch.Mul(cfg.MaxDeposits))
|
||||
|
||||
@@ -92,12 +92,12 @@ func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch primitives.Epoch,
|
||||
// domain=domain,
|
||||
// ))
|
||||
func ComputeSigningRoot(object fssz.HashRoot, domain []byte) ([32]byte, error) {
|
||||
return SigningData(object.HashTreeRoot, domain)
|
||||
return Data(object.HashTreeRoot, domain)
|
||||
}
|
||||
|
||||
// SigningData computes the signing data by utilising the provided root function and then
|
||||
// Data computes the signing data by utilising the provided root function and then
|
||||
// returning the signing data of the container object.
|
||||
func SigningData(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, error) {
|
||||
func Data(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, error) {
|
||||
objRoot, err := rootFunc()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
@@ -152,7 +152,7 @@ func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub, signatur
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert bytes to signature")
|
||||
}
|
||||
root, err := SigningData(blkHdr.HashTreeRoot, domain)
|
||||
root, err := Data(blkHdr.HashTreeRoot, domain)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute signing root")
|
||||
}
|
||||
@@ -191,7 +191,7 @@ func BlockSignatureBatch(pub, signature, domain []byte, rootFunc func() ([32]byt
|
||||
return nil, errors.Wrap(err, "could not convert bytes to public key")
|
||||
}
|
||||
// utilize custom block hashing function
|
||||
root, err := SigningData(rootFunc, domain)
|
||||
root, err := Data(rootFunc, domain)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute signing root")
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stateutil"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/container/trie"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
@@ -221,6 +222,18 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
|
||||
|
||||
// EmptyGenesisState returns an empty beacon state object.
|
||||
func EmptyGenesisState() (state.BeaconState, error) {
|
||||
blockRoots := make([][]byte, fieldparams.BlockRootsLength)
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
stateRoots := make([][]byte, fieldparams.StateRootsLength)
|
||||
for i := range stateRoots {
|
||||
stateRoots[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
mixes := make([][]byte, fieldparams.RandaoMixesLength)
|
||||
for i := range mixes {
|
||||
mixes[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
st := ðpb.BeaconState{
|
||||
// Misc fields.
|
||||
Slot: 0,
|
||||
@@ -229,6 +242,9 @@ func EmptyGenesisState() (state.BeaconState, error) {
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
},
|
||||
BlockRoots: blockRoots,
|
||||
StateRoots: stateRoots,
|
||||
RandaoMixes: mixes,
|
||||
// Validator registry fields.
|
||||
Validators: []*ethpb.Validator{},
|
||||
Balances: []uint64{},
|
||||
|
||||
@@ -103,8 +103,8 @@ func TestGenesisState_HashEquality(t *testing.T) {
|
||||
pbstate, err := state_native.ProtobufBeaconStatePhase0(state.ToProto())
|
||||
require.NoError(t, err)
|
||||
|
||||
root1, err1 := hash.HashProto(pbState1)
|
||||
root2, err2 := hash.HashProto(pbstate)
|
||||
root1, err1 := hash.Proto(pbState1)
|
||||
root2, err2 := hash.Proto(pbstate)
|
||||
|
||||
if err1 != nil || err2 != nil {
|
||||
t.Fatalf("Failed to marshal state to bytes: %v %v", err1, err2)
|
||||
|
||||
@@ -382,10 +382,18 @@ func TestProcessEpochPrecompute_CanProcess(t *testing.T) {
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
JustificationBits: bitfield.Bitvector4{0x00},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MinDepositAmount,
|
||||
},
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetValidators([]*ethpb.Validator{}))
|
||||
newState, err := transition.ProcessEpochPrecompute(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(0), newState.Slashings()[2], "Unexpected slashed balance")
|
||||
|
||||
@@ -21,9 +21,9 @@ import (
|
||||
// an already exited validator
|
||||
var ValidatorAlreadyExitedErr = errors.New("validator already exited")
|
||||
|
||||
// ValidatorsMaxExitEpochAndChurn returns the maximum non-FAR_FUTURE_EPOCH exit
|
||||
// MaxExitEpochAndChurn returns the maximum non-FAR_FUTURE_EPOCH exit
|
||||
// epoch and the number of them
|
||||
func ValidatorsMaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, churn uint64) {
|
||||
func MaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, churn uint64) {
|
||||
farFutureEpoch := params.BeaconConfig().FarFutureEpoch
|
||||
err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
e := val.ExitEpoch()
|
||||
@@ -82,10 +82,7 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primiti
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
currentChurn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, "could not get churn limit")
|
||||
}
|
||||
currentChurn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
|
||||
if churn >= currentChurn {
|
||||
exitQueueEpoch, err = exitQueueEpoch.SafeAdd(1)
|
||||
@@ -137,7 +134,7 @@ func SlashValidator(
|
||||
slashedIdx primitives.ValidatorIndex,
|
||||
penaltyQuotient uint64,
|
||||
proposerRewardQuotient uint64) (state.BeaconState, error) {
|
||||
maxExitEpoch, churn := ValidatorsMaxExitEpochAndChurn(s)
|
||||
maxExitEpoch, churn := MaxExitEpochAndChurn(s)
|
||||
s, _, err := InitiateValidatorExit(ctx, s, slashedIdx, maxExitEpoch, churn)
|
||||
if err != nil && !errors.Is(err, ValidatorAlreadyExitedErr) {
|
||||
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)
|
||||
@@ -235,10 +232,7 @@ func ExitedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validato
|
||||
exitQueueChurn++
|
||||
}
|
||||
}
|
||||
churn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get churn limit")
|
||||
}
|
||||
churn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
if churn < exitQueueChurn {
|
||||
exitQueueEpoch++
|
||||
}
|
||||
@@ -276,10 +270,7 @@ func EjectedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validat
|
||||
exitQueueChurn++
|
||||
}
|
||||
}
|
||||
churn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get churn limit")
|
||||
}
|
||||
churn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
if churn < exitQueueChurn {
|
||||
exitQueueEpoch++
|
||||
}
|
||||
|
||||
@@ -410,7 +410,7 @@ func TestValidatorMaxExitEpochAndChurn(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
epoch, churn := ValidatorsMaxExitEpochAndChurn(s)
|
||||
epoch, churn := MaxExitEpochAndChurn(s)
|
||||
require.Equal(t, tt.wantedEpoch, epoch)
|
||||
require.Equal(t, tt.wantedChurn, churn)
|
||||
}
|
||||
|
||||
50
beacon-chain/das/BUILD.bazel
Normal file
50
beacon-chain/das/BUILD.bazel
Normal file
@@ -0,0 +1,50 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability.go",
|
||||
"blocking.go",
|
||||
"cache.go",
|
||||
"error.go",
|
||||
"iface.go",
|
||||
"mock.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"availability_test.go",
|
||||
"cache_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
158
beacon-chain/das/availability.go
Normal file
158
beacon-chain/das/availability.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type kzgBatch func([][]byte, []*ethpb.BlobSidecar) error
|
||||
|
||||
type LazilyPersistentStore struct {
|
||||
db BlobsDB
|
||||
cache *cache
|
||||
verifyKZG kzgBatch
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &LazilyPersistentStore{}
|
||||
|
||||
func NewLazilyPersistentStore(db BlobsDB) *LazilyPersistentStore {
|
||||
return &LazilyPersistentStore{
|
||||
db: db,
|
||||
cache: newCache(),
|
||||
verifyKZG: kzg.IsDataAvailable,
|
||||
}
|
||||
}
|
||||
|
||||
// PersistOnceCommitted adds blobs to the working blob cache (in-memory or disk backed is an implementation
|
||||
// detail). Blobs stored in this cache will be persisted for at least as long as the node is
|
||||
// running. Once IsDataAvailable succeeds, all blobs referenced by the given block are guaranteed
|
||||
// to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStore) Persist(ctx context.Context, current primitives.Slot, sc ...*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error) {
|
||||
if len(sc) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(sc) == 1 {
|
||||
|
||||
}
|
||||
var key cacheKey
|
||||
var entry *cacheEntry
|
||||
persisted := make([]*ethpb.BlobSidecar, 0, len(sc))
|
||||
for i := range sc {
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sc[i].Slot), slots.ToEpoch(current)) {
|
||||
continue
|
||||
}
|
||||
if sc[i].Index >= fieldparams.MaxBlobsPerBlock {
|
||||
log.WithField("block_root", sc[i].BlockRoot).WithField("index", sc[i].Index).Error("discarding BlobSidecar with index >= MaxBlobsPerBlock")
|
||||
continue
|
||||
}
|
||||
skey := keyFromSidecar(sc[i])
|
||||
if key != skey {
|
||||
key = skey
|
||||
entry = s.cache.ensure(key)
|
||||
}
|
||||
if entry.stash(sc[i]) {
|
||||
persisted = append(persisted, sc[i])
|
||||
}
|
||||
}
|
||||
return persisted, nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// - BlobSidecars already in the db are assumed to have been previously verified against the block.
|
||||
// - BlobSidecars waiting for verification in the cache will be persisted to the db after verification.
|
||||
// - When BlobSidecars are written to the db, their cache entries are cleared.
|
||||
// - BlobSidecar cache entries with commitments that do not match the block will be evicted.
|
||||
// - BlobSidecar cachee entries with commitments that fail proof verification will be evicted.
|
||||
func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments := commitmentsToCheck(b, current)
|
||||
if len(blockCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
key := keyFromBlock(b)
|
||||
entry := s.cache.ensure(key)
|
||||
// holding the lock over the course of the DA check simplifies everything
|
||||
entry.Lock()
|
||||
defer entry.Unlock()
|
||||
if err := s.daCheck(ctx, b.Root(), blockCommitments, entry); err != nil {
|
||||
return err
|
||||
}
|
||||
// If there is no error, DA has been successful, so we can clean up the cache.
|
||||
s.cache.delete(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *LazilyPersistentStore) daCheck(ctx context.Context, root [32]byte, blockCommitments [][]byte, entry *cacheEntry) error {
|
||||
sidecars, cacheErr := entry.filter(root, blockCommitments)
|
||||
if cacheErr == nil {
|
||||
if err := s.verifyKZG(blockCommitments, sidecars); err != nil {
|
||||
s.cache.delete(keyFromSidecar(sidecars[0]))
|
||||
return err
|
||||
}
|
||||
// We have all the committed sidecars in cache, and they all have valid proofs.
|
||||
// If flushing them to backing storage succeeds, then we can confirm DA.
|
||||
return s.db.SaveBlobSidecar(ctx, sidecars)
|
||||
}
|
||||
|
||||
// Before returning the cache error, check if we have the data in the db.
|
||||
dbidx, err := s.persisted(ctx, root, entry)
|
||||
// persisted() accounts for db.ErrNotFound, so this is a real database error.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notInDb, err := dbidx.missing(blockCommitments)
|
||||
// This is a database integrity sanity check - it should never fail.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// All commitments were found in the db, due to a previous successful DA check.
|
||||
if len(notInDb) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return cacheErr
|
||||
}
|
||||
|
||||
// persisted populate the db cache, which contains a mapping from Index->KzgCommitment for BlobSidecars previously verified
|
||||
// (proof verification) and saved to the backend.
|
||||
func (s *LazilyPersistentStore) persisted(ctx context.Context, root [32]byte, entry *cacheEntry) (dbidx, error) {
|
||||
if entry.dbidxInitialized() {
|
||||
return entry.dbidx(), nil
|
||||
}
|
||||
sidecars, err := s.db.BlobSidecarsByRoot(ctx, root)
|
||||
if err != nil {
|
||||
if errors.Is(err, db.ErrNotFound) {
|
||||
// No BlobSidecars, initialize with empty idx.
|
||||
return entry.ensureDbidx(), nil
|
||||
}
|
||||
return entry.dbidx(), err
|
||||
}
|
||||
// Ensure all sidecars found in the db are represented in the cache and return the cache value.
|
||||
return entry.ensureDbidx(sidecars...), nil
|
||||
}
|
||||
|
||||
func commitmentsToCheck(b blocks.ROBlock, current primitives.Slot) [][]byte {
|
||||
if b.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
kzgCommitments, err := b.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return kzgCommitments
|
||||
}
|
||||
318
beacon-chain/das/availability_test.go
Normal file
318
beacon-chain/das/availability_test.go
Normal file
@@ -0,0 +1,318 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
func Test_commitmentsToCheck(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
commits := [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
commits [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "pre deneb",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
bb := util.NewBeaconBlockBellatrix()
|
||||
sb, err := blocks.NewSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "commitments within da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
d.Block.Slot = 100
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
commits: commits,
|
||||
slot: 100,
|
||||
},
|
||||
{
|
||||
name: "commitments outside da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
// block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
b := c.block(t)
|
||||
co := commitmentsToCheck(b, c.slot)
|
||||
require.Equal(t, len(c.commits), len(co))
|
||||
for i := 0; i < len(c.commits); i++ {
|
||||
require.Equal(t, true, bytes.Equal(c.commits[i], co[i]))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func daAlwaysSucceeds(_ [][]byte, _ []*ethpb.BlobSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type mockDA struct {
|
||||
t *testing.T
|
||||
cmts [][]byte
|
||||
scs []*ethpb.BlobSidecar
|
||||
err error
|
||||
}
|
||||
|
||||
func (m *mockDA) expectedArguments(gotC [][]byte, gotSC []*ethpb.BlobSidecar) error {
|
||||
require.Equal(m.t, len(m.cmts), len(gotC))
|
||||
require.Equal(m.t, len(gotSC), len(m.scs))
|
||||
for i := range m.cmts {
|
||||
require.Equal(m.t, true, bytes.Equal(m.cmts[i], gotC[i]))
|
||||
}
|
||||
for i := range m.scs {
|
||||
require.Equal(m.t, m.scs[i], gotSC[i])
|
||||
}
|
||||
return m.err
|
||||
}
|
||||
|
||||
func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := &mockBlobsDB{}
|
||||
as := NewLazilyPersistentStore(db)
|
||||
|
||||
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
|
||||
expectedCommitments, err := blk.Block().Body().BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
vf := &mockDA{
|
||||
t: t,
|
||||
cmts: expectedCommitments,
|
||||
scs: scs,
|
||||
}
|
||||
as.verifyKZG = vf.expectedArguments
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
_, err = as.Persist(ctx, 1, scs[2])
|
||||
require.NoError(t, err)
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
missingErr := MissingIndicesError{}
|
||||
require.Equal(t, true, errors.As(err, &missingErr))
|
||||
require.DeepEqual(t, []uint64{0, 1}, missingErr.Missing())
|
||||
|
||||
// All but one persisted, return missing idx
|
||||
_, err = as.Persist(ctx, 1, scs[0])
|
||||
require.NoError(t, err)
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, errors.As(err, &missingErr))
|
||||
require.DeepEqual(t, []uint64{1}, missingErr.Missing())
|
||||
|
||||
// All persisted, return nil
|
||||
_, err = as.Persist(ctx, 1, scs[1])
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
||||
}
|
||||
|
||||
func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := &mockBlobsDB{}
|
||||
as := NewLazilyPersistentStore(db)
|
||||
|
||||
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
|
||||
vf := &mockDA{
|
||||
t: t,
|
||||
err: errors.New("kzg check should not run"),
|
||||
}
|
||||
as.verifyKZG = vf.expectedArguments
|
||||
scs[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
_, err := as.Persist(ctx, 1, scs[0])
|
||||
require.NoError(t, err)
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
mismatchErr := CommitmentMismatchError{}
|
||||
require.Equal(t, true, errors.As(err, &mismatchErr))
|
||||
require.DeepEqual(t, []uint64{0}, mismatchErr.Mismatch())
|
||||
|
||||
// the next time we call the DA check, the mismatched commitment should be evicted
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
missingErr := MissingIndicesError{}
|
||||
require.Equal(t, true, errors.As(err, &missingErr))
|
||||
require.DeepEqual(t, []uint64{0, 1, 2}, missingErr.Missing())
|
||||
}
|
||||
|
||||
func TestPersisted(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
db := &mockBlobsDB{
|
||||
BlobSidecarsByRootCallback: func(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
return scs, nil
|
||||
},
|
||||
}
|
||||
vf := &mockDA{
|
||||
t: t,
|
||||
err: errors.New("kzg check should not run"),
|
||||
}
|
||||
as := NewLazilyPersistentStore(db)
|
||||
as.verifyKZG = vf.expectedArguments
|
||||
entry := &cacheEntry{}
|
||||
dbidx, err := as.persisted(ctx, blk.Root(), entry)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, dbidx[0] == nil)
|
||||
for i := range scs {
|
||||
require.Equal(t, *dbidx[i], bytesutil.ToBytes48(scs[i].KzgCommitment))
|
||||
}
|
||||
|
||||
expectedCommitments, err := blk.Block().Body().BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
missing, err := dbidx.missing(expectedCommitments)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(missing))
|
||||
|
||||
// test that the caching is working by returning the wrong set of sidecars
|
||||
// and making sure that dbidx still thinks none are missing
|
||||
db = &mockBlobsDB{
|
||||
BlobSidecarsByRootCallback: func(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
return scs[1:], nil
|
||||
},
|
||||
}
|
||||
as = NewLazilyPersistentStore(db)
|
||||
// note, using the same entry value
|
||||
dbidx, err = as.persisted(ctx, blk.Root(), entry)
|
||||
require.NoError(t, err)
|
||||
// same assertions should pass as when all sidecars returned by db
|
||||
missing, err = dbidx.missing(expectedCommitments)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(missing))
|
||||
|
||||
// do it again, but with a fresh cache entry - we should see a missing sidecar
|
||||
newEntry := &cacheEntry{}
|
||||
dbidx, err = as.persisted(ctx, blk.Root(), newEntry)
|
||||
require.NoError(t, err)
|
||||
missing, err = dbidx.missing(expectedCommitments)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(missing))
|
||||
// only element in missing should be the zero index
|
||||
require.Equal(t, uint64(0), missing[0])
|
||||
}
|
||||
|
||||
func TestLazilyPersistent_DBFallback(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
// Generate the same sidecars index 0 so we can mess with its commitment
|
||||
_, scscp := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 1)
|
||||
db := &mockBlobsDB{
|
||||
BlobSidecarsByRootCallback: func(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
return scs, nil
|
||||
},
|
||||
}
|
||||
vf := &mockDA{
|
||||
t: t,
|
||||
err: errors.New("kzg check should not run"),
|
||||
}
|
||||
as := NewLazilyPersistentStore(db)
|
||||
as.verifyKZG = vf.expectedArguments
|
||||
|
||||
// Set up the mismatched commit, but we don't expect this to error because
|
||||
// the db contains the sidecars.
|
||||
scscp[0].BlockRoot = scs[0].BlockRoot
|
||||
scscp[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
_, err := as.Persist(ctx, 1, scscp[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
// This should pass since the db is giving us all the right sidecars
|
||||
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
||||
|
||||
// now using an empty db, we should fail
|
||||
as.db = &mockBlobsDB{}
|
||||
|
||||
// but we should have pruned, so we'll get a missing error, not mismatch
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
missingErr := MissingIndicesError{}
|
||||
require.Equal(t, true, errors.As(err, &missingErr))
|
||||
require.DeepEqual(t, []uint64{0, 1, 2}, missingErr.Missing())
|
||||
|
||||
// put the bad value back in the cache
|
||||
persisted, err := as.Persist(ctx, 1, scscp[0])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(persisted))
|
||||
// now we'll get a mismatch error
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
mismatchErr := CommitmentMismatchError{}
|
||||
require.Equal(t, true, errors.As(err, &mismatchErr))
|
||||
require.DeepEqual(t, []uint64{0}, mismatchErr.Mismatch())
|
||||
}
|
||||
|
||||
func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||
as := NewLazilyPersistentStore(&mockBlobsDB{})
|
||||
// stashes as expected
|
||||
p, err := as.Persist(ctx, 1, scs...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(p))
|
||||
// ignores duplicates
|
||||
p, err = as.Persist(ctx, 1, scs...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(p))
|
||||
|
||||
// ignores index out of bound
|
||||
scs[0].Index = 6
|
||||
p, err = as.Persist(ctx, 1, scs[0])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(p))
|
||||
|
||||
_, more := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||
// ignores sidecars before the retention period
|
||||
slotOOB, err := slots.EpochStart(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
p, err = as.Persist(ctx, 32+slotOOB, more[0])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(p))
|
||||
|
||||
// doesn't ignore new sidecars with a different block root
|
||||
p, err = as.Persist(ctx, 1, more...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(p))
|
||||
}
|
||||
93
beacon-chain/das/blocking.go
Normal file
93
beacon-chain/das/blocking.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
errors "github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// AsyncStore wraps NewCachingDBVerifiedStore and blocks until IsDataAvailable is ready.
|
||||
// If the context given to IsDataAvailable is cancelled, the result of IsDataAvailable will be ctx.Error().
|
||||
type AsyncStore struct {
|
||||
notif *idxNotifiers
|
||||
s AvailabilityStore
|
||||
}
|
||||
|
||||
func (bs *AsyncStore) PersistOnceCommitted(ctx context.Context, current primitives.Slot, sc ...*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error) {
|
||||
if len(sc) < 1 {
|
||||
return nil, nil
|
||||
}
|
||||
seen := bs.notif.ensure(keyFromSidecar(sc[0]))
|
||||
persisted, err := bs.s.Persist(ctx, current, sc...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := range persisted {
|
||||
seen <- persisted[i].Index
|
||||
}
|
||||
return persisted, nil
|
||||
}
|
||||
|
||||
func (bs *AsyncStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
key := keyFromBlock(b)
|
||||
for {
|
||||
err := bs.s.IsDataAvailable(ctx, current, b)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
mie := MissingIndicesError{}
|
||||
if !errors.As(err, &mie) {
|
||||
return err
|
||||
}
|
||||
waitFor := make(map[uint64]struct{})
|
||||
for _, m := range mie.Missing() {
|
||||
waitFor[m] = struct{}{}
|
||||
}
|
||||
if err := waitForIndices(ctx, bs.notif.ensure(key), waitFor); err != nil {
|
||||
return err
|
||||
}
|
||||
bs.notif.reset(key)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForIndices(ctx context.Context, idxSeen chan uint64, waitFor map[uint64]struct{}) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case idx := <-idxSeen:
|
||||
delete(waitFor, idx)
|
||||
if len(waitFor) == 0 {
|
||||
return nil
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type idxNotifiers struct {
|
||||
sync.RWMutex
|
||||
entries map[cacheKey]chan uint64
|
||||
}
|
||||
|
||||
func (c *idxNotifiers) ensure(key cacheKey) chan uint64 {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
e, ok := c.entries[key]
|
||||
if !ok {
|
||||
e = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
c.entries[key] = e
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func (c *idxNotifiers) reset(key cacheKey) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
delete(c.entries, key)
|
||||
}
|
||||
184
beacon-chain/das/cache.go
Normal file
184
beacon-chain/das/cache.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
errors "github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
errDBCommitmentMismatch = errors.New("blob/block commitment mismatch")
|
||||
)
|
||||
|
||||
// cacheKey includes the slot so that we can easily iterate through the cache and compare
|
||||
// slots for eviction purposes. Whether the input is the block or the sidecar, we always have
|
||||
// the root+slot when interacting with the cache, so it isn't an inconvenience to use both.
|
||||
type cacheKey struct {
|
||||
slot primitives.Slot
|
||||
root [32]byte
|
||||
}
|
||||
|
||||
type cache struct {
|
||||
sync.RWMutex
|
||||
entries map[cacheKey]*cacheEntry
|
||||
}
|
||||
|
||||
func newCache() *cache {
|
||||
return &cache{entries: make(map[cacheKey]*cacheEntry)}
|
||||
}
|
||||
|
||||
// keyFromSidecar is a convenience method for constructing a cacheKey from a BlobSidecar value.
|
||||
func keyFromSidecar(sc *ethpb.BlobSidecar) cacheKey {
|
||||
return cacheKey{slot: sc.Slot, root: bytesutil.ToBytes32(sc.BlockRoot)}
|
||||
}
|
||||
|
||||
// keyFromBlock is a convenience method for constructing a cacheKey from a ROBlock value.
|
||||
func keyFromBlock(b blocks.ROBlock) cacheKey {
|
||||
return cacheKey{slot: b.Block().Slot(), root: b.Root()}
|
||||
}
|
||||
|
||||
// ensure returns the entry for the given key, creating it if it isn't already present.
|
||||
func (c *cache) ensure(key cacheKey) *cacheEntry {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
e, ok := c.entries[key]
|
||||
if !ok {
|
||||
e = &cacheEntry{}
|
||||
c.entries[key] = e
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// delete removes the cache entry from the cache.
|
||||
func (c *cache) delete(key cacheKey) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
delete(c.entries, key)
|
||||
}
|
||||
|
||||
// dbidx is a compact representation of the set of BlobSidecars in the database for a given root,
|
||||
// organized as a map from BlobSidecar.Index->BlobSidecar.KzgCommitment.
|
||||
// This representation is convenient for comparison to a block's commitments.
|
||||
type dbidx [fieldparams.MaxBlobsPerBlock]*[48]byte
|
||||
|
||||
// missing compares the set of BlobSidecars observed in the backing store to the set of commitments
|
||||
// observed in a block - cmts is the BlobKzgCommitments field from a block.
|
||||
func (idx dbidx) missing(cmts [][]byte) ([]uint64, error) {
|
||||
m := make([]uint64, 0, len(cmts))
|
||||
for i := range cmts {
|
||||
if idx[i] == nil {
|
||||
m = append(m, uint64(i))
|
||||
continue
|
||||
}
|
||||
c := *idx[i]
|
||||
if c != bytesutil.ToBytes48(cmts[i]) {
|
||||
return nil, errors.Wrapf(errDBCommitmentMismatch,
|
||||
"index=%d, db=%#x, block=%#x", i, c, cmts[i])
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// cacheEntry represents 2 different types of caches for a given block.
|
||||
// scs is a fixed-length cache of BlobSidecars.
|
||||
// dbx is a compact representation of BlobSidecars observed in the backing store.
|
||||
// dbx assumes that all writes to the backing store go through the same cache.
|
||||
type cacheEntry struct {
|
||||
sync.RWMutex
|
||||
scs [fieldparams.MaxBlobsPerBlock]*ethpb.BlobSidecar
|
||||
dbx dbidx
|
||||
dbRead bool
|
||||
}
|
||||
|
||||
// stash adds an item to the in-memory cache of BlobSidecars.
|
||||
// Only the first BlobSidecar of a given Index will be kept in the cache.
|
||||
// The return value represents whether the given BlobSidecar was stashed.
|
||||
// A false value means there was already a BlobSidecar with the given Index.
|
||||
func (e *cacheEntry) stash(sc *ethpb.BlobSidecar) bool {
|
||||
if e.scs[sc.Index] == nil {
|
||||
e.scs[sc.Index] = sc
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (e *cacheEntry) dbidx() dbidx {
|
||||
return e.dbx
|
||||
}
|
||||
|
||||
func (e *cacheEntry) dbidxInitialized() bool {
|
||||
return e.dbRead
|
||||
}
|
||||
|
||||
// filter evicts sidecars that are not commited to by the block and returns custom
|
||||
// errors if the cache is missing any of the commitments, or if the commitments in
|
||||
// the cache do not match those found in the block. If err is nil, then all expected
|
||||
// commitments were found in the cache and the sidecar slice return value can be used
|
||||
// to perform a DA check against the cached sidecars.
|
||||
func (e *cacheEntry) filter(root [32]byte, blkCmts [][]byte) ([]*ethpb.BlobSidecar, error) {
|
||||
// Evict any blobs that are out of range.
|
||||
for i := len(blkCmts); i < fieldparams.MaxBlobsPerBlock; i++ {
|
||||
if e.scs[i] == nil {
|
||||
continue
|
||||
}
|
||||
log.WithField("block_root", root).
|
||||
WithField("index", i).
|
||||
WithField("cached_commitment", fmt.Sprintf("%#x", e.scs[i].KzgCommitment)).
|
||||
Warn("Evicting BlobSidecar with index > maximum blob commitment")
|
||||
e.scs[i] = nil
|
||||
}
|
||||
// Generate a MissingIndicesError for any missing indices.
|
||||
// Generate a CommitmentMismatchError for any mismatched commitments.
|
||||
missing := make([]uint64, 0, len(blkCmts))
|
||||
mismatch := make([]uint64, 0, len(blkCmts))
|
||||
for i := range blkCmts {
|
||||
if e.scs[i] == nil {
|
||||
missing = append(missing, uint64(i))
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(blkCmts[i], e.scs[i].KzgCommitment) {
|
||||
mismatch = append(mismatch, uint64(i))
|
||||
log.WithField("block_root", root).
|
||||
WithField("index", i).
|
||||
WithField("expected_commitment", fmt.Sprintf("%#x", blkCmts[i])).
|
||||
WithField("cached_commitment", fmt.Sprintf("%#x", e.scs[i].KzgCommitment)).
|
||||
Error("Evicting BlobSidecar with incorrect commitment")
|
||||
e.scs[i] = nil
|
||||
continue
|
||||
}
|
||||
}
|
||||
if len(mismatch) > 0 {
|
||||
return nil, NewCommitmentMismatchError(mismatch)
|
||||
}
|
||||
if len(missing) > 0 {
|
||||
return nil, NewMissingIndicesError(missing)
|
||||
}
|
||||
return e.scs[0:len(blkCmts)], nil
|
||||
}
|
||||
|
||||
// ensureDbidx updates the db cache representation to include the given BlobSidecars.
|
||||
func (e *cacheEntry) ensureDbidx(scs ...*ethpb.BlobSidecar) dbidx {
|
||||
if e.dbRead == false {
|
||||
e.dbRead = true
|
||||
}
|
||||
for i := range scs {
|
||||
if scs[i].Index >= fieldparams.MaxBlobsPerBlock {
|
||||
continue
|
||||
}
|
||||
// Don't overwrite.
|
||||
if e.dbx[scs[i].Index] != nil {
|
||||
continue
|
||||
}
|
||||
c := bytesutil.ToBytes48(scs[i].KzgCommitment)
|
||||
e.dbx[scs[i].Index] = &c
|
||||
}
|
||||
return e.dbx
|
||||
}
|
||||
122
beacon-chain/das/cache_test.go
Normal file
122
beacon-chain/das/cache_test.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestCacheEnsureDelete(t *testing.T) {
|
||||
c := newCache()
|
||||
require.Equal(t, 0, len(c.entries))
|
||||
root := bytesutil.ToBytes32([]byte("root"))
|
||||
slot := primitives.Slot(1234)
|
||||
k := cacheKey{root: root, slot: slot}
|
||||
entry := c.ensure(k)
|
||||
require.Equal(t, 1, len(c.entries))
|
||||
require.Equal(t, c.entries[k], entry)
|
||||
|
||||
c.delete(k)
|
||||
require.Equal(t, 0, len(c.entries))
|
||||
var nilEntry *cacheEntry
|
||||
require.Equal(t, nilEntry, c.entries[k])
|
||||
}
|
||||
|
||||
func TestNewEntry(t *testing.T) {
|
||||
entry := &cacheEntry{}
|
||||
require.Equal(t, false, entry.dbidxInitialized())
|
||||
entry.ensureDbidx()
|
||||
require.Equal(t, true, entry.dbidxInitialized())
|
||||
}
|
||||
|
||||
func TestDbidxBounds(t *testing.T) {
|
||||
scs := generateMinimalBlobSidecars(2)
|
||||
entry := &cacheEntry{}
|
||||
entry.ensureDbidx(scs...)
|
||||
//require.Equal(t, 2, len(entry.dbidx()))
|
||||
for i := range scs {
|
||||
require.Equal(t, bytesutil.ToBytes48(scs[i].KzgCommitment), *entry.dbidx()[i])
|
||||
}
|
||||
|
||||
var nilPtr *[48]byte
|
||||
// test that duplicate sidecars are ignored
|
||||
orig := entry.dbidx()
|
||||
copy(scs[0].KzgCommitment[0:4], []byte("derp"))
|
||||
edited := bytesutil.ToBytes48(scs[0].KzgCommitment)
|
||||
require.Equal(t, false, *entry.dbidx()[0] == edited)
|
||||
entry.ensureDbidx(scs[0])
|
||||
for i := 2; i < fieldparams.MaxBlobsPerBlock; i++ {
|
||||
require.Equal(t, entry.dbidx()[i], nilPtr)
|
||||
}
|
||||
require.Equal(t, entry.dbidx(), orig)
|
||||
|
||||
// test that excess sidecars are discarded
|
||||
oob := generateMinimalBlobSidecars(fieldparams.MaxBlobsPerBlock + 1)
|
||||
entry = &cacheEntry{}
|
||||
entry.ensureDbidx(oob...)
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(entry.dbidx()))
|
||||
}
|
||||
|
||||
func TestDbidxMissing(t *testing.T) {
|
||||
scs := generateMinimalBlobSidecars(6)
|
||||
missing := []uint64{0, 1, 2, 3, 4, 5}
|
||||
blockCommits := commitsForSidecars(scs)
|
||||
cases := []struct {
|
||||
name string
|
||||
nMissing int
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "all missing",
|
||||
nMissing: len(scs),
|
||||
},
|
||||
{
|
||||
name: "none missing",
|
||||
nMissing: 0,
|
||||
},
|
||||
{
|
||||
name: "2 missing",
|
||||
nMissing: 2,
|
||||
},
|
||||
{
|
||||
name: "3 missing",
|
||||
nMissing: 3,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
l := len(scs)
|
||||
entry := &cacheEntry{}
|
||||
d := entry.ensureDbidx(scs[0 : l-c.nMissing]...)
|
||||
m, err := d.missing(blockCommits)
|
||||
if c.err == nil {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.DeepEqual(t, m, missing[l-c.nMissing:])
|
||||
require.Equal(t, c.nMissing, len(m))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func commitsForSidecars(scs []*ethpb.BlobSidecar) [][]byte {
|
||||
m := make([][]byte, len(scs))
|
||||
for i := range scs {
|
||||
m[i] = scs[i].KzgCommitment
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func generateMinimalBlobSidecars(n int) []*ethpb.BlobSidecar {
|
||||
scs := make([]*ethpb.BlobSidecar, n)
|
||||
for i := 0; i < n; i++ {
|
||||
scs[i] = ðpb.BlobSidecar{
|
||||
Index: uint64(i),
|
||||
KzgCommitment: bytesutil.PadTo([]byte{byte(i)}, 48),
|
||||
}
|
||||
}
|
||||
return scs
|
||||
}
|
||||
76
beacon-chain/das/error.go
Normal file
76
beacon-chain/das/error.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
errDAIncomplete = errors.New("some BlobSidecars are not available at this time")
|
||||
errDAEquivocated = errors.New("cache contains BlobSidecars that do not match block commitments")
|
||||
errMixedRoots = errors.New("BlobSidecars must all be for the same block")
|
||||
)
|
||||
|
||||
// The following errors are exported so that gossip verification can use errors.Is to determine the correct pubsub.ValidationResult.
|
||||
var (
|
||||
// ErrInvalidInclusionProof is returned when the inclusion proof check on the BlobSidecar fails.
|
||||
ErrInvalidInclusionProof = errors.New("BlobSidecar inclusion proof is invalid")
|
||||
// ErrInvalidBlockSignature is returned when the BlobSidecar.SignedBeaconBlockHeader signature cannot be verified against the block root.
|
||||
ErrInvalidBlockSignature = errors.New("SignedBeaconBlockHeader signature could not verified")
|
||||
// ErrInvalidCommitment is returned when the kzg_commitment cannot be verified against the kzg_proof and blob.
|
||||
ErrInvalidCommitment = errors.New("BlobSidecar.kzg_commitment verification failed")
|
||||
)
|
||||
|
||||
func NewMissingIndicesError(missing []uint64) MissingIndicesError {
|
||||
return MissingIndicesError{indices: missing}
|
||||
}
|
||||
|
||||
type MissingIndicesError struct {
|
||||
indices []uint64
|
||||
}
|
||||
|
||||
var _ error = MissingIndicesError{}
|
||||
|
||||
func (m MissingIndicesError) Error() string {
|
||||
is := make([]string, 0, len(m.indices))
|
||||
for i := range m.indices {
|
||||
is = append(is, fmt.Sprintf("%d", m.indices[i]))
|
||||
}
|
||||
return fmt.Sprintf("%s at indices %s", errDAIncomplete.Error(), strings.Join(is, ","))
|
||||
}
|
||||
|
||||
func (m MissingIndicesError) Missing() []uint64 {
|
||||
return m.indices
|
||||
}
|
||||
|
||||
func (m MissingIndicesError) Unwrap() error {
|
||||
return errDAIncomplete
|
||||
}
|
||||
|
||||
func NewCommitmentMismatchError(mismatch []uint64) CommitmentMismatchError {
|
||||
return CommitmentMismatchError{mismatch: mismatch}
|
||||
}
|
||||
|
||||
type CommitmentMismatchError struct {
|
||||
mismatch []uint64
|
||||
}
|
||||
|
||||
var _ error = CommitmentMismatchError{}
|
||||
|
||||
func (m CommitmentMismatchError) Error() string {
|
||||
is := make([]string, 0, len(m.mismatch))
|
||||
for i := range m.mismatch {
|
||||
is = append(is, fmt.Sprintf("%d", m.mismatch[i]))
|
||||
}
|
||||
return fmt.Sprintf("%s at indices %s", errDAEquivocated.Error(), strings.Join(is, ","))
|
||||
}
|
||||
|
||||
func (m CommitmentMismatchError) Mismatch() []uint64 {
|
||||
return m.mismatch
|
||||
}
|
||||
|
||||
func (m CommitmentMismatchError) Unwrap() error {
|
||||
return errDAEquivocated
|
||||
}
|
||||
22
beacon-chain/das/iface.go
Normal file
22
beacon-chain/das/iface.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// BlobsDB specifies the persistence store methods needed by the AvailabilityStore.
|
||||
type BlobsDB interface {
|
||||
BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error)
|
||||
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.BlobSidecar) error
|
||||
}
|
||||
|
||||
// AvailabilityStore describes a component that can verify and save sidecars for a given block, and confirm previously
|
||||
// verified and saved sidecars.
|
||||
type AvailabilityStore interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
Persist(ctx context.Context, current primitives.Slot, sc ...*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error)
|
||||
}
|
||||
51
beacon-chain/das/mock.go
Normal file
51
beacon-chain/das/mock.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
type MockAvailabilityStore struct {
|
||||
VerifyAvailabilityCallback func(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
PersistBlobsCallback func(ctx context.Context, current primitives.Slot, sc ...*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error)
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &MockAvailabilityStore{}
|
||||
|
||||
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
if m.VerifyAvailabilityCallback != nil {
|
||||
return m.VerifyAvailabilityCallback(ctx, current, b)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockAvailabilityStore) Persist(ctx context.Context, current primitives.Slot, sc ...*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error) {
|
||||
if m.PersistBlobsCallback != nil {
|
||||
return m.PersistBlobsCallback(ctx, current, sc...)
|
||||
}
|
||||
return sc, nil
|
||||
}
|
||||
|
||||
type mockBlobsDB struct {
|
||||
BlobSidecarsByRootCallback func(ctx context.Context, root [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error)
|
||||
SaveBlobSidecarCallback func(ctx context.Context, sidecars []*ethpb.BlobSidecar) error
|
||||
}
|
||||
|
||||
var _ BlobsDB = &mockBlobsDB{}
|
||||
|
||||
func (b *mockBlobsDB) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
if b.BlobSidecarsByRootCallback != nil {
|
||||
return b.BlobSidecarsByRootCallback(ctx, root, indices...)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (b *mockBlobsDB) SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.BlobSidecar) error {
|
||||
if b.SaveBlobSidecarCallback != nil {
|
||||
return b.SaveBlobSidecarCallback(ctx, sidecars)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -13,9 +13,9 @@ func NewDB(ctx context.Context, dirPath string) (Database, error) {
|
||||
return kv.NewKVStore(ctx, dirPath)
|
||||
}
|
||||
|
||||
// NewDBFilename uses the KVStoreDatafilePath so that if this layer of
|
||||
// NewFileName uses the KVStoreDatafilePath so that if this layer of
|
||||
// indirection between db.NewDB->kv.NewKVStore ever changes, it will be easy to remember
|
||||
// to also change this filename indirection at the same time.
|
||||
func NewDBFilename(dirPath string) string {
|
||||
return kv.KVStoreDatafilePath(dirPath)
|
||||
func NewFileName(dirPath string) string {
|
||||
return kv.StoreDatafilePath(dirPath)
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ type NoHeadAccessDatabase interface {
|
||||
|
||||
// Blob operations.
|
||||
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.BlobSidecar) error
|
||||
DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error
|
||||
DeleteBlobSidecars(ctx context.Context, beaconBlockRoot [32]byte) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
}
|
||||
@@ -170,7 +170,7 @@ type SlasherDatabase interface {
|
||||
// Database interface with full access.
|
||||
type Database interface {
|
||||
io.Closer
|
||||
backup.BackupExporter
|
||||
backup.Exporter
|
||||
HeadAccessDatabase
|
||||
|
||||
DatabasePath() string
|
||||
|
||||
@@ -48,7 +48,7 @@ func (rk blobRotatingKey) BlockRoot() []byte {
|
||||
|
||||
// SaveBlobSidecar saves the blobs for a given epoch in the sidecar bucket. When we receive a blob:
|
||||
//
|
||||
// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_BLOB_EPOCHS*SLOTS_PER_EPOCH
|
||||
// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_EPOCHS_TO_PERSIST_BLOBS*SLOTS_PER_EPOCH
|
||||
//
|
||||
// 2. Compute key for blob as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||
//
|
||||
@@ -121,7 +121,8 @@ func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) e
|
||||
})
|
||||
}
|
||||
|
||||
// validUniqueSidecars ensures that all sidecars have the same slot, parent root, block root, and proposer index, and no more than MAX_BLOB_EPOCHS.
|
||||
// validUniqueSidecars ensures that all sidecars have the same slot, parent root, block root, and proposer index, and
|
||||
// there are no more than MAX_BLOBS_PER_BLOCK sidecars.
|
||||
func validUniqueSidecars(scs []*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error) {
|
||||
if len(scs) == 0 {
|
||||
return nil, errEmptySidecar
|
||||
@@ -206,6 +207,37 @@ func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices .
|
||||
return filterForIndices(sc, indices...)
|
||||
}
|
||||
|
||||
func (s *Store) BlobIndicesAvailable(ctx context.Context, root [32]byte) ([]uint64, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobIndicesAvailable")
|
||||
defer span.End()
|
||||
|
||||
var enc []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket(blobsBucket).Cursor()
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if bytes.HasSuffix(k, root[:]) {
|
||||
enc = v
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if enc == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
scs := ðpb.BlobSidecars{}
|
||||
if err := decode(ctx, enc, scs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
idxs := make([]uint64, len(scs.Sidecars))
|
||||
for i := range scs.Sidecars {
|
||||
idxs[i] = scs.Sidecars[i].Index
|
||||
}
|
||||
return idxs, nil
|
||||
}
|
||||
|
||||
func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
if len(indices) == 0 {
|
||||
return sc.Sidecars, nil
|
||||
@@ -225,7 +257,7 @@ func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobS
|
||||
}
|
||||
|
||||
// BlobSidecarsBySlot retrieves BlobSidecars for the given slot.
|
||||
// If the `indices` argument is omitted, all blobs for the root will be returned.
|
||||
// If the `indices` argument is omitted, all blobs for the slot will be returned.
|
||||
// Otherwise, the result will be filtered to only include the specified indices.
|
||||
// An error will result if an invalid index is specified.
|
||||
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
|
||||
@@ -260,8 +292,8 @@ func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices
|
||||
return filterForIndices(sc, indices...)
|
||||
}
|
||||
|
||||
// DeleteBlobSidecar returns true if the blobs are in the db.
|
||||
func (s *Store) DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error {
|
||||
// DeleteBlobSidecars returns true if the blobs are in the db.
|
||||
func (s *Store) DeleteBlobSidecars(ctx context.Context, beaconBlockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.DeleteBlobSidecar")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
|
||||
@@ -91,7 +91,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
|
||||
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||
expect := make([]*ethpb.BlobSidecar, 2)
|
||||
@@ -108,7 +108,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), uint64(len(scs)))
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
@@ -127,7 +127,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
@@ -147,7 +147,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
|
||||
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||
expect := make([]*ethpb.BlobSidecar, 2)
|
||||
@@ -165,7 +165,7 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, uint64(len(scs)))
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
@@ -175,11 +175,11 @@ func TestStore_BlobSidecars(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
|
||||
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
require.NoError(t, db.DeleteBlobSidecar(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)))
|
||||
require.NoError(t, db.DeleteBlobSidecars(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)))
|
||||
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
require.Equal(t, 0, len(got))
|
||||
|
||||
@@ -92,10 +92,10 @@ type Store struct {
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// KVStoreDatafilePath is the canonical construction of a full
|
||||
// StoreDatafilePath is the canonical construction of a full
|
||||
// database file path from the directory path, so that code outside
|
||||
// this package can find the full path in a consistent way.
|
||||
func KVStoreDatafilePath(dirPath string) string {
|
||||
func StoreDatafilePath(dirPath string) string {
|
||||
return path.Join(dirPath, DatabaseFileName)
|
||||
}
|
||||
|
||||
@@ -146,7 +146,7 @@ func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
datafile := KVStoreDatafilePath(dirPath)
|
||||
datafile := StoreDatafilePath(dirPath)
|
||||
log.Infof("Opening Bolt DB at %s", datafile)
|
||||
boltDB, err := bolt.Open(
|
||||
datafile,
|
||||
|
||||
@@ -22,7 +22,7 @@ func Restore(cliCtx *cli.Context) error {
|
||||
targetDir := cliCtx.String(cmd.RestoreTargetDirFlag.Name)
|
||||
|
||||
restoreDir := path.Join(targetDir, kv.BeaconNodeDbDirName)
|
||||
if file.FileExists(path.Join(restoreDir, kv.DatabaseFileName)) {
|
||||
if file.Exists(path.Join(restoreDir, kv.DatabaseFileName)) {
|
||||
resp, err := prompt.ValidatePrompt(
|
||||
os.Stdin, dbExistsYesNoPrompt, prompt.ValidateYesOrNo,
|
||||
)
|
||||
|
||||
@@ -48,6 +48,8 @@ func TestLatestMainchainInfo_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
testAcc.Backend.Commit()
|
||||
|
||||
tickerChan := make(chan time.Time)
|
||||
web3Service.eth1HeadTicker = &time.Ticker{C: tickerChan}
|
||||
exitRoutine := make(chan bool)
|
||||
|
||||
go func() {
|
||||
@@ -58,8 +60,6 @@ func TestLatestMainchainInfo_OK(t *testing.T) {
|
||||
header, err := web3Service.HeaderByNumber(web3Service.ctx, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
tickerChan := make(chan time.Time)
|
||||
web3Service.eth1HeadTicker = &time.Ticker{C: tickerChan}
|
||||
tickerChan <- time.Now()
|
||||
web3Service.cancel()
|
||||
exitRoutine <- true
|
||||
|
||||
@@ -64,10 +64,10 @@ const (
|
||||
GetPayloadMethodV3 = "engine_getPayloadV3"
|
||||
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
|
||||
ExchangeTransitionConfigurationMethod = "engine_exchangeTransitionConfigurationV1"
|
||||
// ExecutionBlockByHashMethod request string for JSON-RPC.
|
||||
ExecutionBlockByHashMethod = "eth_getBlockByHash"
|
||||
// ExecutionBlockByNumberMethod request string for JSON-RPC.
|
||||
ExecutionBlockByNumberMethod = "eth_getBlockByNumber"
|
||||
// BlockByHashMethod request string for JSON-RPC.
|
||||
BlockByHashMethod = "eth_getBlockByHash"
|
||||
// BlockByNumberMethod request string for JSON-RPC.
|
||||
BlockByNumberMethod = "eth_getBlockByNumber"
|
||||
// GetPayloadBodiesByHashV1 v1 request string for JSON-RPC.
|
||||
GetPayloadBodiesByHashV1 = "engine_getPayloadBodiesByHashV1"
|
||||
// GetPayloadBodiesByRangeV1 v1 request string for JSON-RPC.
|
||||
@@ -89,7 +89,7 @@ type ForkchoiceUpdatedResponse struct {
|
||||
// ExecutionPayloadReconstructor defines a service that can reconstruct a full beacon
|
||||
// block with an execution payload from a signed beacon block and a connection
|
||||
// to an execution client's engine API.
|
||||
type ExecutionPayloadReconstructor interface {
|
||||
type PayloadReconstructor interface {
|
||||
ReconstructFullBlock(
|
||||
ctx context.Context, blindedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
) (interfaces.SignedBeaconBlock, error)
|
||||
@@ -463,7 +463,7 @@ func (s *Service) LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock,
|
||||
err := s.rpcClient.CallContext(
|
||||
ctx,
|
||||
result,
|
||||
ExecutionBlockByNumberMethod,
|
||||
BlockByNumberMethod,
|
||||
"latest",
|
||||
false, /* no full transaction objects */
|
||||
)
|
||||
@@ -476,7 +476,7 @@ func (s *Service) ExecutionBlockByHash(ctx context.Context, hash common.Hash, wi
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExecutionBlockByHash")
|
||||
defer span.End()
|
||||
result := &pb.ExecutionBlock{}
|
||||
err := s.rpcClient.CallContext(ctx, result, ExecutionBlockByHashMethod, hash, withTxs)
|
||||
err := s.rpcClient.CallContext(ctx, result, BlockByHashMethod, hash, withTxs)
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
@@ -495,7 +495,7 @@ func (s *Service) ExecutionBlocksByHashes(ctx context.Context, hashes []common.H
|
||||
blk := &pb.ExecutionBlock{}
|
||||
newH := h
|
||||
elems = append(elems, gethRPC.BatchElem{
|
||||
Method: ExecutionBlockByHashMethod,
|
||||
Method: BlockByHashMethod,
|
||||
Args: []interface{}{newH, withTxs},
|
||||
Result: blk,
|
||||
Error: error(nil),
|
||||
@@ -517,7 +517,7 @@ func (s *Service) ExecutionBlocksByHashes(ctx context.Context, hashes []common.H
|
||||
// HeaderByHash returns the relevant header details for the provided block hash.
|
||||
func (s *Service) HeaderByHash(ctx context.Context, hash common.Hash) (*types.HeaderInfo, error) {
|
||||
var hdr *types.HeaderInfo
|
||||
err := s.rpcClient.CallContext(ctx, &hdr, ExecutionBlockByHashMethod, hash, false /* no transactions */)
|
||||
err := s.rpcClient.CallContext(ctx, &hdr, BlockByHashMethod, hash, false /* no transactions */)
|
||||
if err == nil && hdr == nil {
|
||||
err = ethereum.NotFound
|
||||
}
|
||||
@@ -527,7 +527,7 @@ func (s *Service) HeaderByHash(ctx context.Context, hash common.Hash) (*types.He
|
||||
// HeaderByNumber returns the relevant header details for the provided block number.
|
||||
func (s *Service) HeaderByNumber(ctx context.Context, number *big.Int) (*types.HeaderInfo, error) {
|
||||
var hdr *types.HeaderInfo
|
||||
err := s.rpcClient.CallContext(ctx, &hdr, ExecutionBlockByNumberMethod, toBlockNumArg(number), false /* no transactions */)
|
||||
err := s.rpcClient.CallContext(ctx, &hdr, BlockByNumberMethod, toBlockNumArg(number), false /* no transactions */)
|
||||
if err == nil && hdr == nil {
|
||||
err = ethereum.NotFound
|
||||
}
|
||||
@@ -622,9 +622,9 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
|
||||
if len(blindedBlocks) == 0 {
|
||||
return []interfaces.SignedBeaconBlock{}, nil
|
||||
}
|
||||
executionHashes := []common.Hash{}
|
||||
validExecPayloads := []int{}
|
||||
zeroExecPayloads := []int{}
|
||||
var executionHashes []common.Hash
|
||||
var validExecPayloads []int
|
||||
var zeroExecPayloads []int
|
||||
for i, b := range blindedBlocks {
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
return nil, errors.Wrap(err, "cannot reconstruct bellatrix block from nil data")
|
||||
|
||||
@@ -37,9 +37,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
_ = ExecutionPayloadReconstructor(&Service{})
|
||||
_ = PayloadReconstructor(&Service{})
|
||||
_ = EngineCaller(&Service{})
|
||||
_ = ExecutionPayloadReconstructor(&Service{})
|
||||
_ = PayloadReconstructor(&Service{})
|
||||
_ = EngineCaller(&mocks.EngineClient{})
|
||||
)
|
||||
|
||||
@@ -141,14 +141,14 @@ func TestClient_IPC(t *testing.T) {
|
||||
err := srv.ExchangeTransitionConfiguration(ctx, want)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
t.Run(ExecutionBlockByNumberMethod, func(t *testing.T) {
|
||||
t.Run(BlockByNumberMethod, func(t *testing.T) {
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
require.Equal(t, true, ok)
|
||||
resp, err := srv.LatestExecutionBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
t.Run(ExecutionBlockByHashMethod, func(t *testing.T) {
|
||||
t.Run(BlockByHashMethod, func(t *testing.T) {
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
require.Equal(t, true, ok)
|
||||
arg := common.BytesToHash([]byte("foo"))
|
||||
@@ -644,7 +644,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.ErrorIs(t, ErrUnknownPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(ExecutionBlockByNumberMethod, func(t *testing.T) {
|
||||
t.Run(BlockByNumberMethod, func(t *testing.T) {
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
require.Equal(t, true, ok)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -712,7 +712,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
err = client.ExchangeTransitionConfiguration(ctx, want)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
t.Run(ExecutionBlockByHashMethod, func(t *testing.T) {
|
||||
t.Run(BlockByHashMethod, func(t *testing.T) {
|
||||
arg := common.BytesToHash([]byte("foo"))
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
depositEventSignature = hash.HashKeccak256([]byte("DepositEvent(bytes,bytes,bytes,bytes,bytes)"))
|
||||
depositEventSignature = hash.Keccak256([]byte("DepositEvent(bytes,bytes,bytes,bytes,bytes)"))
|
||||
)
|
||||
|
||||
const eth1DataSavingInterval = 1000
|
||||
|
||||
@@ -208,13 +208,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.ensureValidPowchainData(ctx); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to validate powchain data")
|
||||
}
|
||||
|
||||
eth1Data, err := s.cfg.beaconDB.ExecutionChainData(ctx)
|
||||
eth1Data, err := s.validPowchainData(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve eth1 data")
|
||||
return nil, errors.Wrap(err, "unable to validate powchain data")
|
||||
}
|
||||
if err := s.initializeEth1Data(ctx, eth1Data); err != nil {
|
||||
return nil, err
|
||||
@@ -822,23 +818,22 @@ func validateDepositContainers(ctrs []*ethpb.DepositContainer) bool {
|
||||
|
||||
// Validates the current powchain data is saved and makes sure that any
|
||||
// embedded genesis state is correctly accounted for.
|
||||
func (s *Service) ensureValidPowchainData(ctx context.Context) error {
|
||||
func (s *Service) validPowchainData(ctx context.Context) (*ethpb.ETH1ChainData, error) {
|
||||
genState, err := s.cfg.beaconDB.GenesisState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Exit early if no genesis state is saved.
|
||||
if genState == nil || genState.IsNil() {
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
eth1Data, err := s.cfg.beaconDB.ExecutionChainData(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to retrieve eth1 data")
|
||||
return nil, errors.Wrap(err, "unable to retrieve eth1 data")
|
||||
}
|
||||
if genState == nil || genState.IsNil() {
|
||||
return eth1Data, nil
|
||||
}
|
||||
if eth1Data == nil || !eth1Data.ChainstartData.Chainstarted || !validateDepositContainers(eth1Data.DepositContainers) {
|
||||
pbState, err := native.ProtobufBeaconStatePhase0(s.preGenesisState.ToProtoUnsafe())
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
s.chainStartData = ðpb.ChainStartData{
|
||||
Chainstarted: true,
|
||||
@@ -856,22 +851,24 @@ func (s *Service) ensureValidPowchainData(ctx context.Context) error {
|
||||
if features.Get().EnableEIP4881 {
|
||||
trie, ok := s.depositTrie.(*depositsnapshot.DepositTree)
|
||||
if !ok {
|
||||
return errors.New("deposit trie was not EIP4881 DepositTree")
|
||||
return nil, errors.New("deposit trie was not EIP4881 DepositTree")
|
||||
}
|
||||
eth1Data.DepositSnapshot, err = trie.ToProto()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
trie, ok := s.depositTrie.(*trie.SparseMerkleTrie)
|
||||
if !ok {
|
||||
return errors.New("deposit trie was not SparseMerkleTrie")
|
||||
return nil, errors.New("deposit trie was not SparseMerkleTrie")
|
||||
}
|
||||
eth1Data.Trie = trie.ToProto()
|
||||
}
|
||||
return s.cfg.beaconDB.SaveExecutionChainData(ctx, eth1Data)
|
||||
if err := s.cfg.beaconDB.SaveExecutionChainData(ctx, eth1Data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return eth1Data, nil
|
||||
}
|
||||
|
||||
func dedupEndpoints(endpoints []string) []string {
|
||||
|
||||
@@ -571,7 +571,8 @@ func TestService_EnsureConsistentPowchainData(t *testing.T) {
|
||||
assert.NoError(t, genState.SetSlot(1000))
|
||||
|
||||
require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(context.Background(), genState))
|
||||
require.NoError(t, s1.ensureValidPowchainData(context.Background()))
|
||||
_, err = s1.validPowchainData(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
eth1Data, err := s1.cfg.beaconDB.ExecutionChainData(context.Background())
|
||||
assert.NoError(t, err)
|
||||
@@ -601,7 +602,8 @@ func TestService_InitializeCorrectly(t *testing.T) {
|
||||
assert.NoError(t, genState.SetSlot(1000))
|
||||
|
||||
require.NoError(t, s1.cfg.beaconDB.SaveGenesisData(context.Background(), genState))
|
||||
require.NoError(t, s1.ensureValidPowchainData(context.Background()))
|
||||
_, err = s1.validPowchainData(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
eth1Data, err := s1.cfg.beaconDB.ExecutionChainData(context.Background())
|
||||
assert.NoError(t, err)
|
||||
@@ -636,7 +638,8 @@ func TestService_EnsureValidPowchainData(t *testing.T) {
|
||||
DepositContainers: []*ethpb.DepositContainer{{Index: 1}},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s1.ensureValidPowchainData(context.Background()))
|
||||
_, err = s1.validPowchainData(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
eth1Data, err := s1.cfg.beaconDB.ExecutionChainData(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -13,10 +13,7 @@ func (f *ForkChoice) LastRoot(epoch primitives.Epoch) [32]byte {
|
||||
if err != nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
if headEpoch < epoch {
|
||||
return [32]byte{}
|
||||
}
|
||||
if headEpoch == epoch {
|
||||
if headEpoch <= epoch {
|
||||
return head.root
|
||||
}
|
||||
for head != nil && head.slot > epochEnd {
|
||||
|
||||
@@ -35,5 +35,5 @@ func TestLastRoot(t *testing.T) {
|
||||
require.Equal(t, [32]byte{'6'}, f.store.headNode.root)
|
||||
require.Equal(t, [32]byte{'2'}, f.LastRoot(0))
|
||||
require.Equal(t, [32]byte{'6'}, f.LastRoot(1))
|
||||
require.Equal(t, [32]byte{}, f.LastRoot(2))
|
||||
require.Equal(t, [32]byte{'6'}, f.LastRoot(2))
|
||||
}
|
||||
|
||||
@@ -52,7 +52,6 @@ func DefaultConfig(enableDebugRPCEndpoints bool, httpModules string) MuxConfig {
|
||||
}
|
||||
if flags.EnableHTTPEthAPI(httpModules) {
|
||||
ethRegistrations := []gateway.PbHandlerRegistration{
|
||||
ethpbservice.RegisterBeaconNodeHandler,
|
||||
ethpbservice.RegisterBeaconChainHandler,
|
||||
ethpbservice.RegisterBeaconValidatorHandler,
|
||||
ethpbservice.RegisterEventsHandler,
|
||||
|
||||
@@ -15,7 +15,7 @@ func TestDefaultConfig(t *testing.T) {
|
||||
require.Equal(t, 2, len(cfg.EthPbMux.Patterns))
|
||||
assert.Equal(t, "/internal/eth/v1/", cfg.EthPbMux.Patterns[0])
|
||||
assert.Equal(t, "/internal/eth/v2/", cfg.EthPbMux.Patterns[1])
|
||||
assert.Equal(t, 4, len(cfg.EthPbMux.Registrations))
|
||||
assert.Equal(t, 3, len(cfg.EthPbMux.Registrations))
|
||||
assert.NotNil(t, cfg.V1AlphaPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.V1AlphaPbMux.Patterns))
|
||||
assert.Equal(t, "/eth/v1alpha1/", cfg.V1AlphaPbMux.Patterns[0])
|
||||
@@ -29,7 +29,7 @@ func TestDefaultConfig(t *testing.T) {
|
||||
require.Equal(t, 2, len(cfg.EthPbMux.Patterns))
|
||||
assert.Equal(t, "/internal/eth/v1/", cfg.EthPbMux.Patterns[0])
|
||||
assert.Equal(t, "/internal/eth/v2/", cfg.EthPbMux.Patterns[1])
|
||||
assert.Equal(t, 5, len(cfg.EthPbMux.Registrations))
|
||||
assert.Equal(t, 4, len(cfg.EthPbMux.Registrations))
|
||||
assert.NotNil(t, cfg.V1AlphaPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.V1AlphaPbMux.Patterns))
|
||||
assert.Equal(t, "/eth/v1alpha1/", cfg.V1AlphaPbMux.Patterns[0])
|
||||
@@ -41,7 +41,7 @@ func TestDefaultConfig(t *testing.T) {
|
||||
assert.NotNil(t, cfg.EthPbMux.Mux)
|
||||
require.Equal(t, 2, len(cfg.EthPbMux.Patterns))
|
||||
assert.Equal(t, "/internal/eth/v1/", cfg.EthPbMux.Patterns[0])
|
||||
assert.Equal(t, 5, len(cfg.EthPbMux.Registrations))
|
||||
assert.Equal(t, 4, len(cfg.EthPbMux.Registrations))
|
||||
assert.Equal(t, (*gateway.PbMux)(nil), cfg.V1AlphaPbMux)
|
||||
})
|
||||
t.Run("Without Eth API", func(t *testing.T) {
|
||||
|
||||
@@ -16,6 +16,7 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//api/gateway:go_default_library",
|
||||
"//api/server:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/pkg/errors"
|
||||
apigateway "github.com/prysmaticlabs/prysm/v4/api/gateway"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/server"
|
||||
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/builder"
|
||||
@@ -244,8 +245,12 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Blockchain Service")
|
||||
if err := beacon.registerBlockchainService(beacon.forkChoicer, synchronizer, beacon.initialSyncComplete); err != nil {
|
||||
bcOpts := []blockchain.Option{
|
||||
blockchain.WithForkChoiceStore(beacon.forkChoicer),
|
||||
blockchain.WithClockSynchronizer(synchronizer),
|
||||
blockchain.WithSyncComplete(beacon.initialSyncComplete),
|
||||
}
|
||||
if err := beacon.registerBlockchainService(bcOpts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -271,6 +276,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
|
||||
log.Debugln("Registering RPC Service")
|
||||
router := mux.NewRouter()
|
||||
router.Use(server.NormalizeQueryValuesHandler)
|
||||
if err := beacon.registerRPCService(router); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -293,9 +299,9 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
}
|
||||
|
||||
// db.DatabasePath is the path to the containing directory
|
||||
// db.NewDBFilename expands that to the canonical full path using
|
||||
// db.NewFileName expands that to the canonical full path using
|
||||
// the same construction as NewDB()
|
||||
c, err := newBeaconNodePromCollector(db.NewDBFilename(beacon.db.DatabasePath()))
|
||||
c, err := newBeaconNodePromCollector(db.NewFileName(beacon.db.DatabasePath()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -512,7 +518,7 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startStateGen(ctx context.Context, bfs *backfill.Status, fc forkchoice.ForkChoicer) error {
|
||||
opts := []stategen.StateGenOption{stategen.WithBackfillStatus(bfs)}
|
||||
opts := []stategen.Option{stategen.WithBackfillStatus(bfs)}
|
||||
sg := stategen.New(b.db, fc, opts...)
|
||||
|
||||
cp, err := b.db.FinalizedCheckpoint(ctx)
|
||||
@@ -605,7 +611,8 @@ func (b *BeaconNode) registerAttestationPool() error {
|
||||
return b.services.RegisterService(s)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *startup.ClockSynchronizer, syncComplete chan struct{}) error {
|
||||
func (b *BeaconNode) registerBlockchainService(required []blockchain.Option) error {
|
||||
log.Debugln("Registering Blockchain Service")
|
||||
var web3Service *execution.Service
|
||||
if err := b.services.FetchService(&web3Service); err != nil {
|
||||
return err
|
||||
@@ -616,10 +623,9 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
return err
|
||||
}
|
||||
|
||||
opts := append(b.serviceFlagOpts.blockchainFlagOpts, required...)
|
||||
// skipcq: CRT-D0001
|
||||
opts := append(
|
||||
b.serviceFlagOpts.blockchainFlagOpts,
|
||||
blockchain.WithForkChoiceStore(fc),
|
||||
opts = append(opts,
|
||||
blockchain.WithDatabase(b.db),
|
||||
blockchain.WithDepositCache(b.depositCache),
|
||||
blockchain.WithChainStartFetcher(web3Service),
|
||||
@@ -635,8 +641,6 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
blockchain.WithFinalizedStateAtStartUp(b.finalizedStateAtStartUp),
|
||||
blockchain.WithProposerIdsCache(b.proposerIdsCache),
|
||||
blockchain.WithClockSynchronizer(gs),
|
||||
blockchain.WithSyncComplete(syncComplete),
|
||||
)
|
||||
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
@@ -711,9 +715,10 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}) erro
|
||||
regularsync.WithStateGen(b.stateGen),
|
||||
regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed),
|
||||
regularsync.WithExecutionPayloadReconstructor(web3Service),
|
||||
regularsync.WithPayloadReconstructor(web3Service),
|
||||
regularsync.WithClockWaiter(b.clockWaiter),
|
||||
regularsync.WithInitialSyncComplete(initialSyncComplete),
|
||||
regularsync.WithStateNotifier(b),
|
||||
)
|
||||
return b.services.RegisterService(rs)
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var hashFn = hash.HashProto
|
||||
var hashFn = hash.Proto
|
||||
|
||||
// AttCaches defines the caches used to satisfy attestation pool interface.
|
||||
// These caches are KV store for various attestations
|
||||
|
||||
@@ -119,7 +119,7 @@ func (s *Service) aggregateAndSaveForkChoiceAtts(atts []*ethpb.Attestation) erro
|
||||
// This checks if the attestation has previously been aggregated for fork choice
|
||||
// return true if yes, false if no.
|
||||
func (s *Service) seen(att *ethpb.Attestation) (bool, error) {
|
||||
attRoot, err := hash.HashProto(att.Data)
|
||||
attRoot, err := hash.Proto(att.Data)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -74,7 +74,8 @@ func (s *Store) SaveSyncCommitteeMessage(msg *ethpb.SyncCommitteeMessage) error
|
||||
}
|
||||
|
||||
// SyncCommitteeMessages returns sync committee messages by slot from the priority queue.
|
||||
// Upon retrieval, the message is removed from the queue.
|
||||
// When calling this method a copy is avoided as the caller is assumed to be only reading the
|
||||
// messages from the store rather than modifying it.
|
||||
func (s *Store) SyncCommitteeMessages(slot primitives.Slot) ([]*ethpb.SyncCommitteeMessage, error) {
|
||||
s.messageLock.RLock()
|
||||
defer s.messageLock.RUnlock()
|
||||
|
||||
@@ -461,7 +461,7 @@ func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
|
||||
}
|
||||
|
||||
func peerIdsFromMultiAddrs(addrs []ma.Multiaddr) []peer.ID {
|
||||
peers := []peer.ID{}
|
||||
var peers []peer.ID
|
||||
for _, a := range addrs {
|
||||
info, err := peer.AddrInfoFromP2pAddr(a)
|
||||
if err != nil {
|
||||
|
||||
@@ -118,7 +118,7 @@ func (s *Store) SetTrustedPeers(peers []peer.ID) {
|
||||
// GetTrustedPeers gets our desired trusted peer ids.
|
||||
// Important: it is assumed that store mutex is locked when calling this method.
|
||||
func (s *Store) GetTrustedPeers() []peer.ID {
|
||||
peers := []peer.ID{}
|
||||
var peers []peer.ID
|
||||
for p := range s.trustedPeers {
|
||||
peers = append(peers, p)
|
||||
}
|
||||
|
||||
@@ -788,7 +788,7 @@ func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
trustedPeers := []peer.ID{}
|
||||
var trustedPeers []peer.ID
|
||||
// Set up bad scores for inbound peers.
|
||||
inboundPeers := p.InboundConnected()
|
||||
for i, pid := range inboundPeers {
|
||||
|
||||
@@ -2,6 +2,8 @@ package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -9,33 +11,53 @@ import (
|
||||
|
||||
// MockBroadcaster implements p2p.Broadcaster for testing.
|
||||
type MockBroadcaster struct {
|
||||
BroadcastCalled bool
|
||||
BroadcastCalled atomic.Bool
|
||||
BroadcastMessages []proto.Message
|
||||
BroadcastAttestations []*ethpb.Attestation
|
||||
msgLock sync.Mutex
|
||||
attLock sync.Mutex
|
||||
}
|
||||
|
||||
// Broadcast records a broadcast occurred.
|
||||
func (m *MockBroadcaster) Broadcast(_ context.Context, msg proto.Message) error {
|
||||
m.BroadcastCalled = true
|
||||
m.BroadcastCalled.Store(true)
|
||||
m.msgLock.Lock()
|
||||
defer m.msgLock.Unlock()
|
||||
m.BroadcastMessages = append(m.BroadcastMessages, msg)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastAttestation records a broadcast occurred.
|
||||
func (m *MockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, a *ethpb.Attestation) error {
|
||||
m.BroadcastCalled = true
|
||||
m.BroadcastCalled.Store(true)
|
||||
m.attLock.Lock()
|
||||
defer m.attLock.Unlock()
|
||||
m.BroadcastAttestations = append(m.BroadcastAttestations, a)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastSyncCommitteeMessage records a broadcast occurred.
|
||||
func (m *MockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
m.BroadcastCalled = true
|
||||
m.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastBlob broadcasts a blob for mock.
|
||||
func (m *MockBroadcaster) BroadcastBlob(context.Context, uint64, *ethpb.SignedBlobSidecar) error {
|
||||
m.BroadcastCalled = true
|
||||
m.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NumMessages returns the number of messages broadcasted.
|
||||
func (m *MockBroadcaster) NumMessages() int {
|
||||
m.msgLock.Lock()
|
||||
defer m.msgLock.Unlock()
|
||||
return len(m.BroadcastMessages)
|
||||
}
|
||||
|
||||
// NumAttestations returns the number of attestations broadcasted.
|
||||
func (m *MockBroadcaster) NumAttestations() int {
|
||||
m.attLock.Lock()
|
||||
defer m.attLock.Unlock()
|
||||
return len(m.BroadcastAttestations)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -41,7 +42,7 @@ type TestP2P struct {
|
||||
BHost host.Host
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
BroadcastCalled bool
|
||||
BroadcastCalled atomic.Bool
|
||||
DelaySend bool
|
||||
Digest [4]byte
|
||||
peers *peers.Status
|
||||
@@ -160,25 +161,25 @@ func (p *TestP2P) ReceivePubSub(topic string, msg proto.Message) {
|
||||
|
||||
// Broadcast a message.
|
||||
func (p *TestP2P) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
p.BroadcastCalled = true
|
||||
p.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastAttestation broadcasts an attestation.
|
||||
func (p *TestP2P) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Attestation) error {
|
||||
p.BroadcastCalled = true
|
||||
p.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastSyncCommitteeMessage broadcasts a sync committee message.
|
||||
func (p *TestP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
p.BroadcastCalled = true
|
||||
p.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastBlob broadcasts a blob for mock.
|
||||
func (p *TestP2P) BroadcastBlob(context.Context, uint64, *ethpb.SignedBlobSidecar) error {
|
||||
p.BroadcastCalled = true
|
||||
p.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ go_library(
|
||||
"//api/gateway/apimiddleware:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
@@ -33,6 +32,7 @@ go_test(
|
||||
srcs = [
|
||||
"custom_handlers_test.go",
|
||||
"custom_hooks_test.go",
|
||||
"endpoint_factory_test.go",
|
||||
"structs_marshalling_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -25,51 +25,6 @@ type sszConfig struct {
|
||||
responseJson SszResponse
|
||||
}
|
||||
|
||||
func handleGetBeaconStateSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "beacon_state.ssz",
|
||||
responseJson: &SszResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleGetBeaconBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "beacon_block.ssz",
|
||||
responseJson: &SszResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleGetBeaconStateSSZV2(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "beacon_state.ssz",
|
||||
responseJson: &VersionedSSZResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleGetBeaconBlockSSZV2(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "beacon_block.ssz",
|
||||
responseJson: &VersionedSSZResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleGetBlindedBeaconBlockSSZ(
|
||||
m *apimiddleware.ApiProxyMiddleware,
|
||||
endpoint apimiddleware.Endpoint,
|
||||
w http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "beacon_block.ssz",
|
||||
responseJson: &VersionedSSZResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func handleProduceBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "produce_beacon_block.ssz",
|
||||
@@ -313,6 +268,8 @@ func receiveEvents(eventChan <-chan *sse.Event, w http.ResponseWriter, req *http
|
||||
default:
|
||||
return apimiddleware.InternalServerError(errors.New("payload version unsupported"))
|
||||
}
|
||||
case events.BlobSidecarTopic:
|
||||
data = &EventBlobSidecarJson{}
|
||||
case "error":
|
||||
data = &EventErrorJson{}
|
||||
default:
|
||||
|
||||
@@ -19,26 +19,6 @@ import (
|
||||
|
||||
// https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Beacon/submitPoolBLSToExecutionChange
|
||||
// expects posting a top-level array. We make it more proto-friendly by wrapping it in a struct.
|
||||
func wrapBLSChangesArray(
|
||||
endpoint *apimiddleware.Endpoint,
|
||||
_ http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
if _, ok := endpoint.PostRequest.(*SubmitBLSToExecutionChangesRequest); !ok {
|
||||
return true, nil
|
||||
}
|
||||
changes := make([]*SignedBLSToExecutionChangeJson, 0)
|
||||
if err := json.NewDecoder(req.Body).Decode(&changes); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
|
||||
}
|
||||
j := &SubmitBLSToExecutionChangesRequest{Changes: changes}
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewReader(b))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type v1alpha1SignedPhase0Block struct {
|
||||
Block *BeaconBlockJson `json:"block"` // tech debt on phase 0 called this block instead of "message"
|
||||
@@ -299,306 +279,6 @@ func preparePublishedBlindedBlock(endpoint *apimiddleware.Endpoint, _ http.Respo
|
||||
return apimiddleware.InternalServerError(errors.New("unsupported block type"))
|
||||
}
|
||||
|
||||
type tempSyncCommitteesResponseJson struct {
|
||||
Data *tempSyncCommitteeValidatorsJson `json:"data"`
|
||||
}
|
||||
|
||||
type tempSyncCommitteeValidatorsJson struct {
|
||||
Validators []string `json:"validators"`
|
||||
ValidatorAggregates []*tempSyncSubcommitteeValidatorsJson `json:"validator_aggregates"`
|
||||
}
|
||||
|
||||
type tempSyncSubcommitteeValidatorsJson struct {
|
||||
Validators []string `json:"validators"`
|
||||
}
|
||||
|
||||
// https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.0.0#/Beacon/getEpochSyncCommittees returns validator_aggregates as a nested array.
|
||||
// grpc-gateway returns a struct with nested fields which we have to transform into a plain 2D array.
|
||||
func prepareValidatorAggregates(body []byte, responseContainer interface{}) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
tempContainer := &tempSyncCommitteesResponseJson{}
|
||||
if err := json.Unmarshal(body, tempContainer); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not unmarshal response into temp container")
|
||||
}
|
||||
container, ok := responseContainer.(*SyncCommitteesResponseJson)
|
||||
if !ok {
|
||||
return false, apimiddleware.InternalServerError(errors.New("container is not of the correct type"))
|
||||
}
|
||||
|
||||
container.Data = &SyncCommitteeValidatorsJson{}
|
||||
container.Data.Validators = tempContainer.Data.Validators
|
||||
container.Data.ValidatorAggregates = make([][]string, len(tempContainer.Data.ValidatorAggregates))
|
||||
for i, srcValAgg := range tempContainer.Data.ValidatorAggregates {
|
||||
dstValAgg := make([]string, len(srcValAgg.Validators))
|
||||
copy(dstValAgg, tempContainer.Data.ValidatorAggregates[i].Validators)
|
||||
container.Data.ValidatorAggregates[i] = dstValAgg
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
type phase0BlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBeaconBlockJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type altairBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBeaconBlockAltairJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type bellatrixBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBeaconBlockBellatrixJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type capellaBlockResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *SignedBeaconBlockCapellaJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type denebBlockResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *SignedBeaconBlockDenebJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type bellatrixBlindedBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBlindedBeaconBlockBellatrixJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type capellaBlindedBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBlindedBeaconBlockCapellaJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type denebBlindedBlockResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *SignedBlindedBeaconBlockDenebJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
respContainer, ok := response.(*BlockV2ResponseJson)
|
||||
if !ok {
|
||||
return false, nil, apimiddleware.InternalServerError(errors.New("container is not of the correct type"))
|
||||
}
|
||||
|
||||
var actualRespContainer interface{}
|
||||
switch {
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_PHASE0.String())):
|
||||
actualRespContainer = &phase0BlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockJson{
|
||||
Message: respContainer.Data.Phase0Block,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_ALTAIR.String())):
|
||||
actualRespContainer = &altairBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockAltairJson{
|
||||
Message: respContainer.Data.AltairBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())):
|
||||
actualRespContainer = &bellatrixBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockBellatrixJson{
|
||||
Message: respContainer.Data.BellatrixBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_CAPELLA.String())):
|
||||
actualRespContainer = &capellaBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockCapellaJson{
|
||||
Message: respContainer.Data.CapellaBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
|
||||
actualRespContainer = &denebBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockDenebJson{
|
||||
Message: respContainer.Data.DenebBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||
}
|
||||
|
||||
j, err := json.Marshal(actualRespContainer)
|
||||
if err != nil {
|
||||
return false, nil, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal response")
|
||||
}
|
||||
return false, j, nil
|
||||
}
|
||||
|
||||
func serializeBlindedBlock(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
respContainer, ok := response.(*BlindedBlockResponseJson)
|
||||
if !ok {
|
||||
return false, nil, apimiddleware.InternalServerError(errors.New("container is not of the correct type"))
|
||||
}
|
||||
|
||||
var actualRespContainer interface{}
|
||||
switch {
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_PHASE0.String())):
|
||||
actualRespContainer = &phase0BlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockJson{
|
||||
Message: respContainer.Data.Phase0Block,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_ALTAIR.String())):
|
||||
actualRespContainer = &altairBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockAltairJson{
|
||||
Message: respContainer.Data.AltairBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())):
|
||||
actualRespContainer = &bellatrixBlindedBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBlindedBeaconBlockBellatrixJson{
|
||||
Message: respContainer.Data.BellatrixBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_CAPELLA.String())):
|
||||
actualRespContainer = &capellaBlindedBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBlindedBeaconBlockCapellaJson{
|
||||
Message: respContainer.Data.CapellaBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
|
||||
actualRespContainer = &denebBlindedBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBlindedBeaconBlockDenebJson{
|
||||
Message: respContainer.Data.DenebBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||
}
|
||||
|
||||
j, err := json.Marshal(actualRespContainer)
|
||||
if err != nil {
|
||||
return false, nil, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal response")
|
||||
}
|
||||
return false, j, nil
|
||||
}
|
||||
|
||||
type phase0StateResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconStateJson `json:"data"`
|
||||
}
|
||||
|
||||
type altairStateResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconStateAltairJson `json:"data"`
|
||||
}
|
||||
|
||||
type bellatrixStateResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconStateBellatrixJson `json:"data"`
|
||||
}
|
||||
|
||||
type capellaStateResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconStateCapellaJson `json:"data"`
|
||||
}
|
||||
|
||||
type denebStateResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconStateDenebJson `json:"data"`
|
||||
}
|
||||
|
||||
func serializeV2State(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
respContainer, ok := response.(*BeaconStateV2ResponseJson)
|
||||
if !ok {
|
||||
return false, nil, apimiddleware.InternalServerError(errors.New("container is not of the correct type"))
|
||||
}
|
||||
|
||||
var actualRespContainer interface{}
|
||||
switch {
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_PHASE0.String())):
|
||||
actualRespContainer = &phase0StateResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.Phase0State,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_ALTAIR.String())):
|
||||
actualRespContainer = &altairStateResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.AltairState,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())):
|
||||
actualRespContainer = &bellatrixStateResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.BellatrixState,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_CAPELLA.String())):
|
||||
actualRespContainer = &capellaStateResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.CapellaState,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
|
||||
actualRespContainer = &denebStateResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.DenebState,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported state version '%s'", respContainer.Version))
|
||||
}
|
||||
|
||||
j, err := json.Marshal(actualRespContainer)
|
||||
if err != nil {
|
||||
return false, nil, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal response")
|
||||
}
|
||||
return false, j, nil
|
||||
}
|
||||
|
||||
type phase0ProduceBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconBlockJson `json:"data"`
|
||||
|
||||
@@ -3,7 +3,6 @@ package apimiddleware
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"strconv"
|
||||
@@ -19,46 +18,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
func TestWrapBLSChangesArray(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &SubmitBLSToExecutionChangesRequest{},
|
||||
}
|
||||
unwrappedChanges := []*SignedBLSToExecutionChangeJson{{Signature: "sig"}}
|
||||
unwrappedChangesJson, err := json.Marshal(unwrappedChanges)
|
||||
require.NoError(t, err)
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(unwrappedChangesJson)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
|
||||
runDefault, errJson := wrapBLSChangesArray(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
wrappedChanges := &SubmitBLSToExecutionChangesRequest{}
|
||||
require.NoError(t, json.NewDecoder(request.Body).Decode(wrappedChanges))
|
||||
require.Equal(t, 1, len(wrappedChanges.Changes), "wrong number of wrapped items")
|
||||
assert.Equal(t, "sig", wrappedChanges.Changes[0].Signature)
|
||||
})
|
||||
|
||||
t.Run("invalid_body", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &SubmitBLSToExecutionChangesRequest{},
|
||||
}
|
||||
var body bytes.Buffer
|
||||
_, err := body.Write([]byte("invalid"))
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
|
||||
runDefault, errJson := wrapBLSChangesArray(endpoint, nil, request)
|
||||
require.Equal(t, false, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetInitialPublishBlockPostRequest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
@@ -401,416 +360,6 @@ func TestPreparePublishedBlindedBlock(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestPrepareValidatorAggregates(t *testing.T) {
|
||||
body := &tempSyncCommitteesResponseJson{
|
||||
Data: &tempSyncCommitteeValidatorsJson{
|
||||
Validators: []string{"1", "2"},
|
||||
ValidatorAggregates: []*tempSyncSubcommitteeValidatorsJson{
|
||||
{
|
||||
Validators: []string{"3", "4"},
|
||||
},
|
||||
{
|
||||
Validators: []string{"5"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
bodyJson, err := json.Marshal(body)
|
||||
require.NoError(t, err)
|
||||
|
||||
container := &SyncCommitteesResponseJson{}
|
||||
runDefault, errJson := prepareValidatorAggregates(bodyJson, container)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
assert.DeepEqual(t, []string{"1", "2"}, container.Data.Validators)
|
||||
require.DeepEqual(t, [][]string{{"3", "4"}, {"5"}}, container.Data.ValidatorAggregates)
|
||||
}
|
||||
|
||||
func TestSerializeV2Block(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
response := &BlockV2ResponseJson{
|
||||
Version: ethpbv2.Version_PHASE0.String(),
|
||||
Data: &SignedBeaconBlockContainerV2Json{
|
||||
Phase0Block: &BeaconBlockJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &BeaconBlockBodyJson{},
|
||||
},
|
||||
Signature: "sig",
|
||||
},
|
||||
ExecutionOptimistic: true,
|
||||
}
|
||||
runDefault, j, errJson := serializeV2Block(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &phase0BlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data.Message)
|
||||
beaconBlock := resp.Data.Message
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
assert.NotNil(t, beaconBlock.Body)
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
response := &BlockV2ResponseJson{
|
||||
Version: ethpbv2.Version_ALTAIR.String(),
|
||||
Data: &SignedBeaconBlockContainerV2Json{
|
||||
AltairBlock: &BeaconBlockAltairJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &BeaconBlockBodyAltairJson{},
|
||||
},
|
||||
Signature: "sig",
|
||||
},
|
||||
ExecutionOptimistic: true,
|
||||
}
|
||||
runDefault, j, errJson := serializeV2Block(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &altairBlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data.Message)
|
||||
beaconBlock := resp.Data.Message
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
assert.NotNil(t, beaconBlock.Body)
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
response := &BlockV2ResponseJson{
|
||||
Version: ethpbv2.Version_BELLATRIX.String(),
|
||||
Data: &SignedBeaconBlockContainerV2Json{
|
||||
BellatrixBlock: &BeaconBlockBellatrixJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &BeaconBlockBodyBellatrixJson{},
|
||||
},
|
||||
Signature: "sig",
|
||||
},
|
||||
ExecutionOptimistic: true,
|
||||
}
|
||||
runDefault, j, errJson := serializeV2Block(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &bellatrixBlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data.Message)
|
||||
beaconBlock := resp.Data.Message
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
assert.NotNil(t, beaconBlock.Body)
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
})
|
||||
|
||||
t.Run("incorrect response type", func(t *testing.T) {
|
||||
response := &types.Empty{}
|
||||
runDefault, j, errJson := serializeV2Block(response)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.Equal(t, 0, len(j))
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "container is not of the correct type"))
|
||||
})
|
||||
|
||||
t.Run("unsupported block version", func(t *testing.T) {
|
||||
response := &BlockV2ResponseJson{
|
||||
Version: "unsupported",
|
||||
}
|
||||
runDefault, j, errJson := serializeV2Block(response)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.Equal(t, 0, len(j))
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported block version"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestSerializeBlindedBlock(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
response := &BlindedBlockResponseJson{
|
||||
Version: ethpbv2.Version_PHASE0.String(),
|
||||
Data: &SignedBlindedBeaconBlockContainerJson{
|
||||
Phase0Block: &BeaconBlockJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &BeaconBlockBodyJson{},
|
||||
},
|
||||
Signature: "sig",
|
||||
},
|
||||
ExecutionOptimistic: true,
|
||||
}
|
||||
runDefault, j, errJson := serializeBlindedBlock(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &phase0BlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data.Message)
|
||||
beaconBlock := resp.Data.Message
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
assert.NotNil(t, beaconBlock.Body)
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
response := &BlindedBlockResponseJson{
|
||||
Version: ethpbv2.Version_ALTAIR.String(),
|
||||
Data: &SignedBlindedBeaconBlockContainerJson{
|
||||
AltairBlock: &BeaconBlockAltairJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &BeaconBlockBodyAltairJson{},
|
||||
},
|
||||
Signature: "sig",
|
||||
},
|
||||
ExecutionOptimistic: true,
|
||||
}
|
||||
runDefault, j, errJson := serializeBlindedBlock(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &altairBlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data.Message)
|
||||
beaconBlock := resp.Data.Message
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
assert.NotNil(t, beaconBlock.Body)
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
response := &BlindedBlockResponseJson{
|
||||
Version: ethpbv2.Version_BELLATRIX.String(),
|
||||
Data: &SignedBlindedBeaconBlockContainerJson{
|
||||
BellatrixBlock: &BlindedBeaconBlockBellatrixJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &BlindedBeaconBlockBodyBellatrixJson{},
|
||||
},
|
||||
Signature: "sig",
|
||||
},
|
||||
ExecutionOptimistic: true,
|
||||
}
|
||||
runDefault, j, errJson := serializeBlindedBlock(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &bellatrixBlindedBlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data.Message)
|
||||
beaconBlock := resp.Data.Message
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
assert.NotNil(t, beaconBlock.Body)
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
})
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
response := &BlindedBlockResponseJson{
|
||||
Version: ethpbv2.Version_CAPELLA.String(),
|
||||
Data: &SignedBlindedBeaconBlockContainerJson{
|
||||
CapellaBlock: &BlindedBeaconBlockCapellaJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &BlindedBeaconBlockBodyCapellaJson{
|
||||
ExecutionPayloadHeader: &ExecutionPayloadHeaderCapellaJson{
|
||||
ParentHash: "parent_hash",
|
||||
FeeRecipient: "fee_recipient",
|
||||
StateRoot: "state_root",
|
||||
ReceiptsRoot: "receipts_root",
|
||||
LogsBloom: "logs_bloom",
|
||||
PrevRandao: "prev_randao",
|
||||
BlockNumber: "block_number",
|
||||
GasLimit: "gas_limit",
|
||||
GasUsed: "gas_used",
|
||||
TimeStamp: "time_stamp",
|
||||
ExtraData: "extra_data",
|
||||
BaseFeePerGas: "base_fee_per_gas",
|
||||
BlockHash: "block_hash",
|
||||
TransactionsRoot: "transactions_root",
|
||||
WithdrawalsRoot: "withdrawals_root",
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: "sig",
|
||||
},
|
||||
ExecutionOptimistic: true,
|
||||
}
|
||||
runDefault, j, errJson := serializeBlindedBlock(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &capellaBlindedBlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data.Message)
|
||||
beaconBlock := resp.Data.Message
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
assert.NotNil(t, beaconBlock.Body)
|
||||
payloadHeader := beaconBlock.Body.ExecutionPayloadHeader
|
||||
assert.NotNil(t, payloadHeader)
|
||||
assert.Equal(t, "parent_hash", payloadHeader.ParentHash)
|
||||
assert.Equal(t, "fee_recipient", payloadHeader.FeeRecipient)
|
||||
assert.Equal(t, "state_root", payloadHeader.StateRoot)
|
||||
assert.Equal(t, "receipts_root", payloadHeader.ReceiptsRoot)
|
||||
assert.Equal(t, "logs_bloom", payloadHeader.LogsBloom)
|
||||
assert.Equal(t, "prev_randao", payloadHeader.PrevRandao)
|
||||
assert.Equal(t, "block_number", payloadHeader.BlockNumber)
|
||||
assert.Equal(t, "gas_limit", payloadHeader.GasLimit)
|
||||
assert.Equal(t, "gas_used", payloadHeader.GasUsed)
|
||||
assert.Equal(t, "time_stamp", payloadHeader.TimeStamp)
|
||||
assert.Equal(t, "extra_data", payloadHeader.ExtraData)
|
||||
assert.Equal(t, "base_fee_per_gas", payloadHeader.BaseFeePerGas)
|
||||
assert.Equal(t, "block_hash", payloadHeader.BlockHash)
|
||||
assert.Equal(t, "transactions_root", payloadHeader.TransactionsRoot)
|
||||
assert.Equal(t, "withdrawals_root", payloadHeader.WithdrawalsRoot)
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
|
||||
})
|
||||
|
||||
t.Run("incorrect response type", func(t *testing.T) {
|
||||
response := &types.Empty{}
|
||||
runDefault, j, errJson := serializeBlindedBlock(response)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.Equal(t, 0, len(j))
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "container is not of the correct type"))
|
||||
})
|
||||
|
||||
t.Run("unsupported block version", func(t *testing.T) {
|
||||
response := &BlindedBlockResponseJson{
|
||||
Version: "unsupported",
|
||||
}
|
||||
runDefault, j, errJson := serializeBlindedBlock(response)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.Equal(t, 0, len(j))
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported block version"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestSerializeV2State(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
response := &BeaconStateV2ResponseJson{
|
||||
Version: ethpbv2.Version_PHASE0.String(),
|
||||
Data: &BeaconStateContainerV2Json{
|
||||
Phase0State: &BeaconStateJson{},
|
||||
AltairState: nil,
|
||||
},
|
||||
}
|
||||
runDefault, j, errJson := serializeV2State(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
require.NoError(t, json.Unmarshal(j, &phase0StateResponseJson{}))
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
response := &BeaconStateV2ResponseJson{
|
||||
Version: ethpbv2.Version_ALTAIR.String(),
|
||||
Data: &BeaconStateContainerV2Json{
|
||||
Phase0State: nil,
|
||||
AltairState: &BeaconStateAltairJson{},
|
||||
},
|
||||
}
|
||||
runDefault, j, errJson := serializeV2State(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
require.NoError(t, json.Unmarshal(j, &altairStateResponseJson{}))
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
response := &BeaconStateV2ResponseJson{
|
||||
Version: ethpbv2.Version_BELLATRIX.String(),
|
||||
Data: &BeaconStateContainerV2Json{
|
||||
Phase0State: nil,
|
||||
BellatrixState: &BeaconStateBellatrixJson{},
|
||||
},
|
||||
}
|
||||
runDefault, j, errJson := serializeV2State(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
require.NoError(t, json.Unmarshal(j, &bellatrixStateResponseJson{}))
|
||||
})
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
response := &BeaconStateV2ResponseJson{
|
||||
Version: ethpbv2.Version_CAPELLA.String(),
|
||||
Data: &BeaconStateContainerV2Json{
|
||||
Phase0State: nil,
|
||||
CapellaState: &BeaconStateCapellaJson{},
|
||||
},
|
||||
}
|
||||
runDefault, j, errJson := serializeV2State(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
require.NoError(t, json.Unmarshal(j, &capellaStateResponseJson{}))
|
||||
})
|
||||
|
||||
t.Run("incorrect response type", func(t *testing.T) {
|
||||
runDefault, j, errJson := serializeV2State(&types.Empty{})
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.Equal(t, 0, len(j))
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "container is not of the correct type"))
|
||||
})
|
||||
|
||||
t.Run("unsupported state version", func(t *testing.T) {
|
||||
response := &BeaconStateV2ResponseJson{
|
||||
Version: "unsupported",
|
||||
}
|
||||
runDefault, j, errJson := serializeV2State(response)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.Equal(t, 0, len(j))
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported state version"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestSerializeProducedV2Block(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
response := &ProduceBlockResponseV2Json{
|
||||
|
||||
@@ -16,27 +16,9 @@ func (f *BeaconEndpointFactory) IsNil() bool {
|
||||
// Paths is a collection of all valid beacon chain API paths.
|
||||
func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
return []string{
|
||||
"/eth/v1/beacon/states/{state_id}/root",
|
||||
"/eth/v1/beacon/states/{state_id}/sync_committees",
|
||||
"/eth/v1/beacon/states/{state_id}/randao",
|
||||
"/eth/v1/beacon/blinded_blocks",
|
||||
"/eth/v1/beacon/blocks/{block_id}",
|
||||
"/eth/v2/beacon/blocks/{block_id}",
|
||||
"/eth/v1/beacon/blocks/{block_id}/attestations",
|
||||
"/eth/v1/beacon/blinded_blocks/{block_id}",
|
||||
"/eth/v1/beacon/pool/attester_slashings",
|
||||
"/eth/v1/beacon/pool/proposer_slashings",
|
||||
"/eth/v1/beacon/pool/bls_to_execution_changes",
|
||||
"/eth/v1/beacon/pool/bls_to_execution_changes",
|
||||
"/eth/v1/beacon/weak_subjectivity",
|
||||
"/eth/v1/node/identity",
|
||||
"/eth/v1/node/peers",
|
||||
"/eth/v1/node/peers/{peer_id}",
|
||||
"/eth/v1/node/peer_count",
|
||||
"/eth/v1/node/version",
|
||||
"/eth/v1/node/health",
|
||||
"/eth/v1/debug/beacon/states/{state_id}",
|
||||
"/eth/v2/debug/beacon/states/{state_id}",
|
||||
"/eth/v1/debug/beacon/heads",
|
||||
"/eth/v2/debug/beacon/heads",
|
||||
"/eth/v1/debug/fork_choice",
|
||||
@@ -53,72 +35,14 @@ func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, error) {
|
||||
endpoint := apimiddleware.DefaultEndpoint()
|
||||
switch path {
|
||||
case "/eth/v1/beacon/states/{state_id}/root":
|
||||
endpoint.GetResponse = &StateRootResponseJson{}
|
||||
case "/eth/v1/beacon/states/{state_id}/sync_committees":
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "epoch"}}
|
||||
endpoint.GetResponse = &SyncCommitteesResponseJson{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeGrpcResponseBodyIntoContainer: prepareValidatorAggregates,
|
||||
}
|
||||
case "/eth/v1/beacon/states/{state_id}/randao":
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "epoch"}}
|
||||
endpoint.GetResponse = &RandaoResponseJson{}
|
||||
case "/eth/v1/beacon/blocks/{block_id}":
|
||||
endpoint.GetResponse = &BlockResponseJson{}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBeaconBlockSSZ}
|
||||
case "/eth/v2/beacon/blocks/{block_id}":
|
||||
endpoint.GetResponse = &BlockV2ResponseJson{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreSerializeMiddlewareResponseIntoJson: serializeV2Block,
|
||||
}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBeaconBlockSSZV2}
|
||||
case "/eth/v1/beacon/blocks/{block_id}/attestations":
|
||||
endpoint.GetResponse = &BlockAttestationsResponseJson{}
|
||||
case "/eth/v1/beacon/blinded_blocks/{block_id}":
|
||||
endpoint.GetResponse = &BlindedBlockResponseJson{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreSerializeMiddlewareResponseIntoJson: serializeBlindedBlock,
|
||||
}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBlindedBeaconBlockSSZ}
|
||||
case "/eth/v1/beacon/pool/attester_slashings":
|
||||
endpoint.PostRequest = &AttesterSlashingJson{}
|
||||
endpoint.GetResponse = &AttesterSlashingsPoolResponseJson{}
|
||||
case "/eth/v1/beacon/pool/proposer_slashings":
|
||||
endpoint.PostRequest = &ProposerSlashingJson{}
|
||||
endpoint.GetResponse = &ProposerSlashingsPoolResponseJson{}
|
||||
case "/eth/v1/beacon/pool/bls_to_execution_changes":
|
||||
endpoint.PostRequest = &SubmitBLSToExecutionChangesRequest{}
|
||||
endpoint.GetResponse = &BLSToExecutionChangesPoolResponseJson{}
|
||||
endpoint.Err = &IndexedVerificationFailureErrorJson{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: wrapBLSChangesArray,
|
||||
}
|
||||
case "/eth/v1/beacon/weak_subjectivity":
|
||||
endpoint.GetResponse = &WeakSubjectivityResponse{}
|
||||
case "/eth/v1/node/identity":
|
||||
endpoint.GetResponse = &IdentityResponseJson{}
|
||||
case "/eth/v1/node/peers":
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "state", Enum: true}, {Name: "direction", Enum: true}}
|
||||
endpoint.GetResponse = &PeersResponseJson{}
|
||||
case "/eth/v1/node/peers/{peer_id}":
|
||||
endpoint.RequestURLLiterals = []string{"peer_id"}
|
||||
endpoint.GetResponse = &PeerResponseJson{}
|
||||
case "/eth/v1/node/peer_count":
|
||||
endpoint.GetResponse = &PeerCountResponseJson{}
|
||||
case "/eth/v1/node/version":
|
||||
endpoint.GetResponse = &VersionResponseJson{}
|
||||
case "/eth/v1/node/health":
|
||||
// Use default endpoint
|
||||
case "/eth/v1/debug/beacon/states/{state_id}":
|
||||
endpoint.GetResponse = &BeaconStateResponseJson{}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBeaconStateSSZ}
|
||||
case "/eth/v2/debug/beacon/states/{state_id}":
|
||||
endpoint.GetResponse = &BeaconStateV2ResponseJson{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreSerializeMiddlewareResponseIntoJson: serializeV2State,
|
||||
}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBeaconStateSSZV2}
|
||||
case "/eth/v1/debug/beacon/heads":
|
||||
endpoint.GetResponse = &ForkChoiceHeadsResponseJson{}
|
||||
case "/eth/v2/debug/beacon/heads":
|
||||
|
||||
17
beacon-chain/rpc/apimiddleware/endpoint_factory_test.go
Normal file
17
beacon-chain/rpc/apimiddleware/endpoint_factory_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package apimiddleware_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestBeaconEndpointFactory_AllPathsRegistered(t *testing.T) {
|
||||
f := &apimiddleware.BeaconEndpointFactory{}
|
||||
|
||||
for _, p := range f.Paths() {
|
||||
_, err := f.Create(p)
|
||||
require.NoError(t, err, "failed to register %s", p)
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
)
|
||||
|
||||
@@ -21,60 +20,12 @@ type WeakSubjectivityResponse struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type StateRootResponseJson struct {
|
||||
Data *StateRootResponse_StateRootJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type StateRootResponse_StateRootJson struct {
|
||||
StateRoot string `json:"root" hex:"true"`
|
||||
}
|
||||
|
||||
type SyncCommitteesResponseJson struct {
|
||||
Data *SyncCommitteeValidatorsJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type RandaoResponseJson struct {
|
||||
Data *struct {
|
||||
Randao string `json:"randao" hex:"true"`
|
||||
} `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type BlockResponseJson struct {
|
||||
Data *SignedBeaconBlockJson `json:"data"`
|
||||
}
|
||||
|
||||
type BlockV2ResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBeaconBlockContainerV2Json `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type BlindedBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBlindedBeaconBlockContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type BlockRootResponseJson struct {
|
||||
Data *BlockRootContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type BlockAttestationsResponseJson struct {
|
||||
Data []*AttestationJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type AttesterSlashingsPoolResponseJson struct {
|
||||
Data []*AttesterSlashingJson `json:"data"`
|
||||
}
|
||||
@@ -83,52 +34,6 @@ type ProposerSlashingsPoolResponseJson struct {
|
||||
Data []*ProposerSlashingJson `json:"data"`
|
||||
}
|
||||
|
||||
type BLSToExecutionChangesPoolResponseJson struct {
|
||||
Data []*SignedBLSToExecutionChangeJson `json:"data"`
|
||||
}
|
||||
|
||||
type IdentityResponseJson struct {
|
||||
Data *IdentityJson `json:"data"`
|
||||
}
|
||||
|
||||
type PeersResponseJson struct {
|
||||
Data []*PeerJson `json:"data"`
|
||||
}
|
||||
|
||||
type PeerResponseJson struct {
|
||||
Data *PeerJson `json:"data"`
|
||||
}
|
||||
|
||||
type PeerCountResponseJson struct {
|
||||
Data PeerCountResponse_PeerCountJson `json:"data"`
|
||||
}
|
||||
|
||||
type PeerCountResponse_PeerCountJson struct {
|
||||
Disconnected string `json:"disconnected"`
|
||||
Connecting string `json:"connecting"`
|
||||
Connected string `json:"connected"`
|
||||
Disconnecting string `json:"disconnecting"`
|
||||
}
|
||||
|
||||
type VersionResponseJson struct {
|
||||
Data *VersionJson `json:"data"`
|
||||
}
|
||||
|
||||
type SyncingResponseJson struct {
|
||||
Data *shared.SyncDetails `json:"data"`
|
||||
}
|
||||
|
||||
type BeaconStateResponseJson struct {
|
||||
Data *BeaconStateJson `json:"data"`
|
||||
}
|
||||
|
||||
type BeaconStateV2ResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BeaconStateContainerV2Json `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type ForkChoiceHeadsResponseJson struct {
|
||||
Data []*ForkChoiceHeadJson `json:"data"`
|
||||
}
|
||||
@@ -252,32 +157,6 @@ type BeaconBlockBodyJson struct {
|
||||
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockContainerV2Json struct {
|
||||
Phase0Block *BeaconBlockJson `json:"phase0_block"`
|
||||
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
||||
BellatrixBlock *BeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
CapellaBlock *BeaconBlockCapellaJson `json:"capella_block"`
|
||||
DenebBlock *BeaconBlockDenebJson `json:"deneb_block"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockContainerJson struct {
|
||||
Phase0Block *BeaconBlockJson `json:"phase0_block"`
|
||||
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
||||
BellatrixBlock *BlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
CapellaBlock *BlindedBeaconBlockCapellaJson `json:"capella_block"`
|
||||
DenebBlock *BlindedBeaconBlockDenebJson `json:"deneb_block"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockContentsContainerJson struct {
|
||||
Phase0Block *SignedBeaconBlockJson `json:"phase0_block"`
|
||||
AltairBlock *SignedBeaconBlockAltairJson `json:"altair_block"`
|
||||
BellatrixBlock *SignedBlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
CapellaBlock *SignedBlindedBeaconBlockCapellaJson `json:"capella_block"`
|
||||
DenebContents *SignedBlindedBeaconBlockContentsDenebJson `json:"deneb_contents"`
|
||||
}
|
||||
|
||||
type BeaconBlockContainerV2Json struct {
|
||||
Phase0Block *BeaconBlockJson `json:"phase0_block"`
|
||||
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
||||
@@ -708,10 +587,6 @@ type BLSToExecutionChangeJson struct {
|
||||
ToExecutionAddress string `json:"to_execution_address" hex:"true"`
|
||||
}
|
||||
|
||||
type SubmitBLSToExecutionChangesRequest struct {
|
||||
Changes []*SignedBLSToExecutionChangeJson `json:"changes"`
|
||||
}
|
||||
|
||||
type DepositJson struct {
|
||||
Proof []string `json:"proof" hex:"true"`
|
||||
Data *Deposit_DataJson `json:"data"`
|
||||
@@ -734,31 +609,6 @@ type VoluntaryExitJson struct {
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
}
|
||||
|
||||
type IdentityJson struct {
|
||||
PeerId string `json:"peer_id"`
|
||||
Enr string `json:"enr"`
|
||||
P2PAddresses []string `json:"p2p_addresses"`
|
||||
DiscoveryAddresses []string `json:"discovery_addresses"`
|
||||
Metadata *MetadataJson `json:"metadata"`
|
||||
}
|
||||
|
||||
type MetadataJson struct {
|
||||
SeqNumber string `json:"seq_number"`
|
||||
Attnets string `json:"attnets" hex:"true"`
|
||||
}
|
||||
|
||||
type PeerJson struct {
|
||||
PeerId string `json:"peer_id"`
|
||||
Enr string `json:"enr"`
|
||||
Address string `json:"last_seen_p2p_address"`
|
||||
State string `json:"state" enum:"true"`
|
||||
Direction string `json:"direction" enum:"true"`
|
||||
}
|
||||
|
||||
type VersionJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
}
|
||||
|
||||
type WithdrawalJson struct {
|
||||
WithdrawalIndex string `json:"index"`
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
@@ -766,155 +616,6 @@ type WithdrawalJson struct {
|
||||
Amount string `json:"amount"`
|
||||
}
|
||||
|
||||
type BeaconStateJson struct {
|
||||
GenesisTime string `json:"genesis_time"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root" hex:"true"`
|
||||
Slot string `json:"slot"`
|
||||
Fork *ForkJson `json:"fork"`
|
||||
LatestBlockHeader *BeaconBlockHeaderJson `json:"latest_block_header"`
|
||||
BlockRoots []string `json:"block_roots" hex:"true"`
|
||||
StateRoots []string `json:"state_roots" hex:"true"`
|
||||
HistoricalRoots []string `json:"historical_roots" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Eth1DataVotes []*Eth1DataJson `json:"eth1_data_votes"`
|
||||
Eth1DepositIndex string `json:"eth1_deposit_index"`
|
||||
Validators []*ValidatorJson `json:"validators"`
|
||||
Balances []string `json:"balances"`
|
||||
RandaoMixes []string `json:"randao_mixes" hex:"true"`
|
||||
Slashings []string `json:"slashings"`
|
||||
PreviousEpochAttestations []*PendingAttestationJson `json:"previous_epoch_attestations"`
|
||||
CurrentEpochAttestations []*PendingAttestationJson `json:"current_epoch_attestations"`
|
||||
JustificationBits string `json:"justification_bits" hex:"true"`
|
||||
PreviousJustifiedCheckpoint *CheckpointJson `json:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *CheckpointJson `json:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
|
||||
}
|
||||
|
||||
type BeaconStateAltairJson struct {
|
||||
GenesisTime string `json:"genesis_time"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root" hex:"true"`
|
||||
Slot string `json:"slot"`
|
||||
Fork *ForkJson `json:"fork"`
|
||||
LatestBlockHeader *BeaconBlockHeaderJson `json:"latest_block_header"`
|
||||
BlockRoots []string `json:"block_roots" hex:"true"`
|
||||
StateRoots []string `json:"state_roots" hex:"true"`
|
||||
HistoricalRoots []string `json:"historical_roots" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Eth1DataVotes []*Eth1DataJson `json:"eth1_data_votes"`
|
||||
Eth1DepositIndex string `json:"eth1_deposit_index"`
|
||||
Validators []*ValidatorJson `json:"validators"`
|
||||
Balances []string `json:"balances"`
|
||||
RandaoMixes []string `json:"randao_mixes" hex:"true"`
|
||||
Slashings []string `json:"slashings"`
|
||||
PreviousEpochParticipation EpochParticipation `json:"previous_epoch_participation"`
|
||||
CurrentEpochParticipation EpochParticipation `json:"current_epoch_participation"`
|
||||
JustificationBits string `json:"justification_bits" hex:"true"`
|
||||
PreviousJustifiedCheckpoint *CheckpointJson `json:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *CheckpointJson `json:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
|
||||
InactivityScores []string `json:"inactivity_scores"`
|
||||
CurrentSyncCommittee *SyncCommitteeJson `json:"current_sync_committee"`
|
||||
NextSyncCommittee *SyncCommitteeJson `json:"next_sync_committee"`
|
||||
}
|
||||
|
||||
type BeaconStateBellatrixJson struct {
|
||||
GenesisTime string `json:"genesis_time"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root" hex:"true"`
|
||||
Slot string `json:"slot"`
|
||||
Fork *ForkJson `json:"fork"`
|
||||
LatestBlockHeader *BeaconBlockHeaderJson `json:"latest_block_header"`
|
||||
BlockRoots []string `json:"block_roots" hex:"true"`
|
||||
StateRoots []string `json:"state_roots" hex:"true"`
|
||||
HistoricalRoots []string `json:"historical_roots" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Eth1DataVotes []*Eth1DataJson `json:"eth1_data_votes"`
|
||||
Eth1DepositIndex string `json:"eth1_deposit_index"`
|
||||
Validators []*ValidatorJson `json:"validators"`
|
||||
Balances []string `json:"balances"`
|
||||
RandaoMixes []string `json:"randao_mixes" hex:"true"`
|
||||
Slashings []string `json:"slashings"`
|
||||
PreviousEpochParticipation EpochParticipation `json:"previous_epoch_participation"`
|
||||
CurrentEpochParticipation EpochParticipation `json:"current_epoch_participation"`
|
||||
JustificationBits string `json:"justification_bits" hex:"true"`
|
||||
PreviousJustifiedCheckpoint *CheckpointJson `json:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *CheckpointJson `json:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
|
||||
InactivityScores []string `json:"inactivity_scores"`
|
||||
CurrentSyncCommittee *SyncCommitteeJson `json:"current_sync_committee"`
|
||||
NextSyncCommittee *SyncCommitteeJson `json:"next_sync_committee"`
|
||||
LatestExecutionPayloadHeader *ExecutionPayloadHeaderJson `json:"latest_execution_payload_header"`
|
||||
}
|
||||
|
||||
type BeaconStateCapellaJson struct {
|
||||
GenesisTime string `json:"genesis_time"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root" hex:"true"`
|
||||
Slot string `json:"slot"`
|
||||
Fork *ForkJson `json:"fork"`
|
||||
LatestBlockHeader *BeaconBlockHeaderJson `json:"latest_block_header"`
|
||||
BlockRoots []string `json:"block_roots" hex:"true"`
|
||||
StateRoots []string `json:"state_roots" hex:"true"`
|
||||
HistoricalRoots []string `json:"historical_roots" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Eth1DataVotes []*Eth1DataJson `json:"eth1_data_votes"`
|
||||
Eth1DepositIndex string `json:"eth1_deposit_index"`
|
||||
Validators []*ValidatorJson `json:"validators"`
|
||||
Balances []string `json:"balances"`
|
||||
RandaoMixes []string `json:"randao_mixes" hex:"true"`
|
||||
Slashings []string `json:"slashings"`
|
||||
PreviousEpochParticipation EpochParticipation `json:"previous_epoch_participation"`
|
||||
CurrentEpochParticipation EpochParticipation `json:"current_epoch_participation"`
|
||||
JustificationBits string `json:"justification_bits" hex:"true"`
|
||||
PreviousJustifiedCheckpoint *CheckpointJson `json:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *CheckpointJson `json:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
|
||||
InactivityScores []string `json:"inactivity_scores"`
|
||||
CurrentSyncCommittee *SyncCommitteeJson `json:"current_sync_committee"`
|
||||
NextSyncCommittee *SyncCommitteeJson `json:"next_sync_committee"`
|
||||
LatestExecutionPayloadHeader *ExecutionPayloadHeaderCapellaJson `json:"latest_execution_payload_header"`
|
||||
NextWithdrawalIndex string `json:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
HistoricalSummaries []*HistoricalSummaryJson `json:"historical_summaries"`
|
||||
}
|
||||
|
||||
type BeaconStateDenebJson struct {
|
||||
GenesisTime string `json:"genesis_time"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root" hex:"true"`
|
||||
Slot string `json:"slot"`
|
||||
Fork *ForkJson `json:"fork"`
|
||||
LatestBlockHeader *BeaconBlockHeaderJson `json:"latest_block_header"`
|
||||
BlockRoots []string `json:"block_roots" hex:"true"`
|
||||
StateRoots []string `json:"state_roots" hex:"true"`
|
||||
HistoricalRoots []string `json:"historical_roots" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Eth1DataVotes []*Eth1DataJson `json:"eth1_data_votes"`
|
||||
Eth1DepositIndex string `json:"eth1_deposit_index"`
|
||||
Validators []*ValidatorJson `json:"validators"`
|
||||
Balances []string `json:"balances"`
|
||||
RandaoMixes []string `json:"randao_mixes" hex:"true"`
|
||||
Slashings []string `json:"slashings"`
|
||||
PreviousEpochParticipation EpochParticipation `json:"previous_epoch_participation"`
|
||||
CurrentEpochParticipation EpochParticipation `json:"current_epoch_participation"`
|
||||
JustificationBits string `json:"justification_bits" hex:"true"`
|
||||
PreviousJustifiedCheckpoint *CheckpointJson `json:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *CheckpointJson `json:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
|
||||
InactivityScores []string `json:"inactivity_scores"`
|
||||
CurrentSyncCommittee *SyncCommitteeJson `json:"current_sync_committee"`
|
||||
NextSyncCommittee *SyncCommitteeJson `json:"next_sync_committee"`
|
||||
LatestExecutionPayloadHeader *ExecutionPayloadHeaderDenebJson `json:"latest_execution_payload_header"` // new in deneb
|
||||
NextWithdrawalIndex string `json:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
HistoricalSummaries []*HistoricalSummaryJson `json:"historical_summaries"`
|
||||
}
|
||||
|
||||
type BeaconStateContainerV2Json struct {
|
||||
Phase0State *BeaconStateJson `json:"phase0_state"`
|
||||
AltairState *BeaconStateAltairJson `json:"altair_state"`
|
||||
BellatrixState *BeaconStateBellatrixJson `json:"bellatrix_state"`
|
||||
CapellaState *BeaconStateCapellaJson `json:"capella_state"`
|
||||
DenebState *BeaconStateDenebJson `json:"deneb_state"`
|
||||
}
|
||||
|
||||
type ForkJson struct {
|
||||
PreviousVersion string `json:"previous_version" hex:"true"`
|
||||
CurrentVersion string `json:"current_version" hex:"true"`
|
||||
@@ -937,11 +638,6 @@ type SyncCommitteeJson struct {
|
||||
AggregatePubkey string `json:"aggregate_pubkey" hex:"true"`
|
||||
}
|
||||
|
||||
type SyncCommitteeValidatorsJson struct {
|
||||
Validators []string `json:"validators"`
|
||||
ValidatorAggregates [][]string `json:"validator_aggregates"`
|
||||
}
|
||||
|
||||
type PendingAttestationJson struct {
|
||||
AggregationBits string `json:"aggregation_bits" hex:"true"`
|
||||
Data *AttestationDataJson `json:"data"`
|
||||
@@ -1176,6 +872,14 @@ type PayloadAttributesV2Json struct {
|
||||
Withdrawals []*WithdrawalJson `json:"withdrawals"`
|
||||
}
|
||||
|
||||
type EventBlobSidecarJson struct {
|
||||
BlockRoot string `json:"block_root" hex:"true"`
|
||||
Index string `json:"index"`
|
||||
Slot string `json:"slot"`
|
||||
KzgCommitment string `json:"kzg_commitment" hex:"true"`
|
||||
VersionedHash string `json:"versioned_hash" hex:"true"`
|
||||
}
|
||||
|
||||
// ---------------
|
||||
// Error handling.
|
||||
// ---------------
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@@ -22,6 +22,7 @@ go_library(
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -41,3 +42,17 @@ go_library(
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["validator_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
beaconState "github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -311,7 +312,7 @@ func (s *Service) AggregatedSigAndAggregationBits(
|
||||
|
||||
// AssignValidatorToSubnet checks the status and pubkey of a particular validator
|
||||
// to discern whether persistent subnets need to be registered for them.
|
||||
func AssignValidatorToSubnet(pubkey []byte, status validator.ValidatorStatus) {
|
||||
func AssignValidatorToSubnet(pubkey []byte, status validator.Status) {
|
||||
if status != validator.Active {
|
||||
return
|
||||
}
|
||||
@@ -486,3 +487,122 @@ func (s *Service) SubmitSyncMessage(ctx context.Context, msg *ethpb.SyncCommitte
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterSyncSubnetCurrentPeriod registers a persistent subnet for the current sync committee period.
|
||||
func RegisterSyncSubnetCurrentPeriod(s beaconState.BeaconState, epoch primitives.Epoch, pubKey []byte, status validator.Status) error {
|
||||
committee, err := s.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
syncCommPeriod := slots.SyncCommitteePeriod(epoch)
|
||||
registerSyncSubnet(epoch, syncCommPeriod, pubKey, committee, status)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterSyncSubnetCurrentPeriodProto registers a persistent subnet for the current sync committee period.
|
||||
func RegisterSyncSubnetCurrentPeriodProto(s beaconState.BeaconState, epoch primitives.Epoch, pubKey []byte, status ethpb.ValidatorStatus) error {
|
||||
committee, err := s.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
syncCommPeriod := slots.SyncCommitteePeriod(epoch)
|
||||
registerSyncSubnetProto(epoch, syncCommPeriod, pubKey, committee, status)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterSyncSubnetNextPeriod registers a persistent subnet for the next sync committee period.
|
||||
func RegisterSyncSubnetNextPeriod(s beaconState.BeaconState, epoch primitives.Epoch, pubKey []byte, status validator.Status) error {
|
||||
committee, err := s.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
syncCommPeriod := slots.SyncCommitteePeriod(epoch)
|
||||
registerSyncSubnet(epoch, syncCommPeriod+1, pubKey, committee, status)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterSyncSubnetNextPeriodProto registers a persistent subnet for the next sync committee period.
|
||||
func RegisterSyncSubnetNextPeriodProto(s beaconState.BeaconState, epoch primitives.Epoch, pubKey []byte, status ethpb.ValidatorStatus) error {
|
||||
committee, err := s.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
syncCommPeriod := slots.SyncCommitteePeriod(epoch)
|
||||
registerSyncSubnetProto(epoch, syncCommPeriod+1, pubKey, committee, status)
|
||||
return nil
|
||||
}
|
||||
|
||||
// registerSyncSubnet checks the status and pubkey of a particular validator
|
||||
// to discern whether persistent subnets need to be registered for them.
|
||||
func registerSyncSubnet(
|
||||
currEpoch primitives.Epoch,
|
||||
syncPeriod uint64,
|
||||
pubkey []byte,
|
||||
syncCommittee *ethpb.SyncCommittee,
|
||||
status validator.Status,
|
||||
) {
|
||||
if status != validator.Active && status != validator.ActiveExiting {
|
||||
return
|
||||
}
|
||||
registerSyncSubnetInternal(currEpoch, syncPeriod, pubkey, syncCommittee)
|
||||
}
|
||||
|
||||
func registerSyncSubnetProto(
|
||||
currEpoch primitives.Epoch,
|
||||
syncPeriod uint64,
|
||||
pubkey []byte,
|
||||
syncCommittee *ethpb.SyncCommittee,
|
||||
status ethpb.ValidatorStatus,
|
||||
) {
|
||||
if status != ethpb.ValidatorStatus_ACTIVE && status != ethpb.ValidatorStatus_EXITING {
|
||||
return
|
||||
}
|
||||
registerSyncSubnetInternal(currEpoch, syncPeriod, pubkey, syncCommittee)
|
||||
}
|
||||
|
||||
func registerSyncSubnetInternal(
|
||||
currEpoch primitives.Epoch,
|
||||
syncPeriod uint64,
|
||||
pubkey []byte,
|
||||
syncCommittee *ethpb.SyncCommittee,
|
||||
) {
|
||||
startEpoch := primitives.Epoch(syncPeriod * uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod))
|
||||
currPeriod := slots.SyncCommitteePeriod(currEpoch)
|
||||
endEpoch := startEpoch + params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
_, _, ok, expTime := cache.SyncSubnetIDs.GetSyncCommitteeSubnets(pubkey, startEpoch)
|
||||
if ok && expTime.After(prysmTime.Now()) {
|
||||
return
|
||||
}
|
||||
firstValidEpoch, err := startEpoch.SafeSub(params.BeaconConfig().SyncCommitteeSubnetCount)
|
||||
if err != nil {
|
||||
firstValidEpoch = 0
|
||||
}
|
||||
// If we are processing for a future period, we only
|
||||
// add to the relevant subscription once we are at the valid
|
||||
// bound.
|
||||
if syncPeriod != currPeriod && currEpoch < firstValidEpoch {
|
||||
return
|
||||
}
|
||||
subs := subnetsFromCommittee(pubkey, syncCommittee)
|
||||
// Handle overflow in the event current epoch is less
|
||||
// than end epoch. This is an impossible condition, so
|
||||
// it is a defensive check.
|
||||
epochsToWatch, err := endEpoch.SafeSub(uint64(currEpoch))
|
||||
if err != nil {
|
||||
epochsToWatch = 0
|
||||
}
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
totalDuration := epochDuration * time.Duration(epochsToWatch) * time.Second
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets(pubkey, startEpoch, subs, totalDuration)
|
||||
}
|
||||
|
||||
// subnetsFromCommittee retrieves the relevant subnets for the chosen validator.
|
||||
func subnetsFromCommittee(pubkey []byte, comm *ethpb.SyncCommittee) []uint64 {
|
||||
positions := make([]uint64, 0)
|
||||
for i, pkey := range comm.Pubkeys {
|
||||
if bytes.Equal(pubkey, pkey) {
|
||||
positions = append(positions, uint64(i)/(params.BeaconConfig().SyncCommitteeSize/params.BeaconConfig().SyncCommitteeSubnetCount))
|
||||
}
|
||||
}
|
||||
return positions
|
||||
}
|
||||
|
||||
65
beacon-chain/rpc/core/validator_test.go
Normal file
65
beacon-chain/rpc/core/validator_test.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestRegisterSyncSubnetProto(t *testing.T) {
|
||||
k := pubKey(3)
|
||||
committee := make([][]byte, 0)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
committee = append(committee, pubKey(uint64(i)))
|
||||
}
|
||||
sCommittee := ðpb.SyncCommittee{
|
||||
Pubkeys: committee,
|
||||
}
|
||||
registerSyncSubnetProto(0, 0, k, sCommittee, ethpb.ValidatorStatus_ACTIVE)
|
||||
coms, _, ok, exp := cache.SyncSubnetIDs.GetSyncCommitteeSubnets(k, 0)
|
||||
require.Equal(t, true, ok, "No cache entry found for validator")
|
||||
assert.Equal(t, uint64(1), uint64(len(coms)))
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
totalTime := time.Duration(params.BeaconConfig().EpochsPerSyncCommitteePeriod) * epochDuration * time.Second
|
||||
receivedTime := time.Until(exp.Round(time.Second)).Round(time.Second)
|
||||
if receivedTime < totalTime {
|
||||
t.Fatalf("Expiration time of %f was less than expected duration of %f ", receivedTime.Seconds(), totalTime.Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegisterSyncSubnet(t *testing.T) {
|
||||
k := pubKey(3)
|
||||
committee := make([][]byte, 0)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
committee = append(committee, pubKey(uint64(i)))
|
||||
}
|
||||
sCommittee := ðpb.SyncCommittee{
|
||||
Pubkeys: committee,
|
||||
}
|
||||
registerSyncSubnet(0, 0, k, sCommittee, validator.Active)
|
||||
coms, _, ok, exp := cache.SyncSubnetIDs.GetSyncCommitteeSubnets(k, 0)
|
||||
require.Equal(t, true, ok, "No cache entry found for validator")
|
||||
assert.Equal(t, uint64(1), uint64(len(coms)))
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
totalTime := time.Duration(params.BeaconConfig().EpochsPerSyncCommitteePeriod) * epochDuration * time.Second
|
||||
receivedTime := time.Until(exp.Round(time.Second)).Round(time.Second)
|
||||
if receivedTime < totalTime {
|
||||
t.Fatalf("Expiration time of %f was less than expected duration of %f ", receivedTime.Seconds(), totalTime.Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
// pubKey is a helper to generate a well-formed public key.
|
||||
func pubKey(i uint64) []byte {
|
||||
pubKey := make([]byte, params.BeaconConfig().BLSPubkeyLength)
|
||||
binary.LittleEndian.PutUint64(pubKey, i)
|
||||
return pubKey
|
||||
}
|
||||
@@ -3,24 +3,21 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"blinded_blocks.go",
|
||||
"blocks.go",
|
||||
"config.go",
|
||||
"handlers.go",
|
||||
"handlers_pool.go",
|
||||
"handlers_state.go",
|
||||
"handlers_validator.go",
|
||||
"log.go",
|
||||
"pool.go",
|
||||
"server.go",
|
||||
"state.go",
|
||||
"structs.go",
|
||||
"sync_committee.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
@@ -58,20 +55,17 @@ go_library(
|
||||
"//network/forks:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_protobuf//ptypes/empty",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
],
|
||||
@@ -84,13 +78,12 @@ go_test(
|
||||
"blocks_test.go",
|
||||
"config_test.go",
|
||||
"handlers_pool_test.go",
|
||||
"handlers_state_test.go",
|
||||
"handlers_test.go",
|
||||
"handlers_validators_test.go",
|
||||
"init_test.go",
|
||||
"pool_test.go",
|
||||
"server_test.go",
|
||||
"state_test.go",
|
||||
"sync_committee_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
@@ -132,7 +125,6 @@ go_test(
|
||||
"//network/http:go_default_library",
|
||||
"//proto/eth/service:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
@@ -145,13 +137,9 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_mock//gomock:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_stretchr_testify//mock:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,629 +0,0 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// GetBlindedBlock retrieves blinded block for given block id.
|
||||
func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv2.BlindedBlockResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlindedBlock")
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs(api.VersionHeader, version.String(blk.Version()))); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set "+api.VersionHeader+" header: %v", err)
|
||||
}
|
||||
result, err := getBlindedBlockPhase0(blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
|
||||
}
|
||||
result, err = getBlindedBlockAltair(blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
|
||||
}
|
||||
result, err = bs.getBlindedBlockBellatrix(ctx, blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
|
||||
}
|
||||
result, err = bs.getBlindedBlockCapella(ctx, blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
|
||||
}
|
||||
result, err = bs.getBlindedBlockDeneb(ctx, blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
|
||||
}
|
||||
|
||||
// GetBlindedBlockSSZ returns the SSZ-serialized version of the blinded beacon block for given block id.
|
||||
func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv2.SSZContainer, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlindedBlockSSZ")
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
|
||||
result, err := getSSZBlockPhase0(blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = getSSZBlockAltair(blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = bs.getBlindedSSZBlockBellatrix(ctx, blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = bs.getBlindedSSZBlockCapella(ctx, blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = bs.getBlindedSSZBlockDeneb(ctx, blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
|
||||
}
|
||||
|
||||
func getBlindedBlockPhase0(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
|
||||
phase0Blk, err := blk.PbPhase0Block()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if phase0Blk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v1Blk, err := migration.SignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
return ðpbv2.BlindedBlockResponse{
|
||||
Version: ethpbv2.Version_PHASE0,
|
||||
Data: ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_Phase0Block{Phase0Block: v1Blk.Block},
|
||||
Signature: v1Blk.Signature,
|
||||
},
|
||||
ExecutionOptimistic: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getBlindedBlockAltair(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
|
||||
altairBlk, err := blk.PbAltairBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if altairBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlindedBlockResponse{
|
||||
Version: ethpbv2.Version_ALTAIR,
|
||||
Data: ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_AltairBlock{AltairBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) getBlindedBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
|
||||
bellatrixBlk, err := blk.PbBellatrixBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
|
||||
if blindedBellatrixBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(blindedBellatrixBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlindedBlockResponse{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if bellatrixBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
blindedBlkInterface, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert block to blinded block")
|
||||
}
|
||||
blindedBellatrixBlock, err := blindedBlkInterface.PbBlindedBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(blindedBellatrixBlock.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlindedBlockResponse{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) getBlindedBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
|
||||
capellaBlk, err := blk.PbCapellaBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedCapellaBlk, err := blk.PbBlindedCapellaBlock(); err == nil {
|
||||
if blindedCapellaBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedCapellaToV2Blinded(blindedCapellaBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlindedBlockResponse{
|
||||
Version: ethpbv2.Version_CAPELLA,
|
||||
Data: ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock{CapellaBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if capellaBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
blindedBlkInterface, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert block to blinded block")
|
||||
}
|
||||
blindedCapellaBlock, err := blindedBlkInterface.PbBlindedCapellaBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedCapellaToV2Blinded(blindedCapellaBlock.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlindedBlockResponse{
|
||||
Version: ethpbv2.Version_CAPELLA,
|
||||
Data: ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock{CapellaBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) getBlindedBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
|
||||
denebBlk, err := blk.PbDenebBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedDenebBlk, err := blk.PbBlindedDenebBlock(); err == nil {
|
||||
if blindedDenebBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(blindedDenebBlk.Message)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlindedBlockResponse{
|
||||
Version: ethpbv2.Version_DENEB,
|
||||
Data: ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_DenebBlock{DenebBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if denebBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
blindedBlkInterface, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert block to blinded block")
|
||||
}
|
||||
blindedDenebBlock, err := blindedBlkInterface.PbBlindedDenebBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(blindedDenebBlock.Message)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlindedBlockResponse{
|
||||
Version: ethpbv2.Version_CAPELLA,
|
||||
Data: ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_DenebBlock{DenebBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) getBlindedSSZBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
|
||||
bellatrixBlk, err := blk.PbBellatrixBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
|
||||
if blindedBellatrixBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(blindedBellatrixBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
data := ðpbv2.SignedBlindedBeaconBlockBellatrix{
|
||||
Message: v2Blk,
|
||||
Signature: sig[:],
|
||||
}
|
||||
sszData, err := data.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Data: sszData,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if bellatrixBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
blindedBlkInterface, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert block to blinded block")
|
||||
}
|
||||
blindedBellatrixBlock, err := blindedBlkInterface.PbBlindedBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(blindedBellatrixBlock.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
data := ðpbv2.SignedBlindedBeaconBlockBellatrix{
|
||||
Message: v2Blk,
|
||||
Signature: sig[:],
|
||||
}
|
||||
sszData, err := data.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_BELLATRIX, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) getBlindedSSZBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
|
||||
capellaBlk, err := blk.PbCapellaBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedCapellaBlk, err := blk.PbBlindedCapellaBlock(); err == nil {
|
||||
if blindedCapellaBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedCapellaToV2Blinded(blindedCapellaBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
data := ðpbv2.SignedBlindedBeaconBlockCapella{
|
||||
Message: v2Blk,
|
||||
Signature: sig[:],
|
||||
}
|
||||
sszData, err := data.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_CAPELLA,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Data: sszData,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if capellaBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
blindedBlkInterface, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert block to blinded block")
|
||||
}
|
||||
blindedCapellaBlock, err := blindedBlkInterface.PbBlindedCapellaBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedCapellaToV2Blinded(blindedCapellaBlock.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
data := ðpbv2.SignedBlindedBeaconBlockCapella{
|
||||
Message: v2Blk,
|
||||
Signature: sig[:],
|
||||
}
|
||||
sszData, err := data.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_CAPELLA, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) getBlindedSSZBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
|
||||
denebBlk, err := blk.PbDenebBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedDenebBlk, err := blk.PbBlindedDenebBlock(); err == nil {
|
||||
if blindedDenebBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(blindedDenebBlk.Message)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
data := ðpbv2.SignedBlindedBeaconBlockDeneb{
|
||||
Message: v2Blk,
|
||||
Signature: sig[:],
|
||||
}
|
||||
sszData, err := data.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_DENEB,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Data: sszData,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if denebBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
blindedBlkInterface, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert block to blinded block")
|
||||
}
|
||||
blindedDenebBlock, err := blindedBlkInterface.PbBlindedDenebBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(blindedDenebBlock.Message)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
data := ðpbv2.SignedBlindedBeaconBlockDeneb{
|
||||
Message: v2Blk,
|
||||
Signature: sig[:],
|
||||
}
|
||||
sszData, err := data.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_DENEB, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
|
||||
}
|
||||
@@ -1,342 +1 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestServer_GetBlindedBlock(t *testing.T) {
|
||||
stream := &runtime.ServerTransportStream{}
|
||||
ctx := grpc.NewContextWithServerTransportStream(context.Background(), stream)
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
bs := &Server{
|
||||
FinalizationFetcher: &mock.ChainService{},
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
}
|
||||
|
||||
expected, err := migration.V1Alpha1ToV1SignedBlock(b)
|
||||
require.NoError(t, err)
|
||||
resp, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
phase0Block, ok := resp.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_Phase0Block)
|
||||
require.Equal(t, true, ok)
|
||||
assert.DeepEqual(t, expected.Block, phase0Block.Phase0Block)
|
||||
assert.Equal(t, ethpbv2.Version_PHASE0, resp.Version)
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
b := util.NewBeaconBlockAltair()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
bs := &Server{
|
||||
FinalizationFetcher: &mock.ChainService{},
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
}
|
||||
|
||||
expected, err := migration.V1Alpha1BeaconBlockAltairToV2(b.Block)
|
||||
require.NoError(t, err)
|
||||
resp, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
altairBlock, ok := resp.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_AltairBlock)
|
||||
require.Equal(t, true, ok)
|
||||
assert.DeepEqual(t, expected, altairBlock.AltairBlock)
|
||||
assert.Equal(t, ethpbv2.Version_ALTAIR, resp.Version)
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
b := util.NewBlindedBeaconBlockBellatrix()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{}
|
||||
bs := &Server{
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
expected, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(b.Block)
|
||||
require.NoError(t, err)
|
||||
resp, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
bellatrixBlock, ok := resp.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock)
|
||||
require.Equal(t, true, ok)
|
||||
assert.DeepEqual(t, expected, bellatrixBlock.BellatrixBlock)
|
||||
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
b := util.NewBlindedBeaconBlockCapella()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{}
|
||||
bs := &Server{
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
expected, err := migration.V1Alpha1BeaconBlockBlindedCapellaToV2Blinded(b.Block)
|
||||
require.NoError(t, err)
|
||||
resp, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
capellaBlock, ok := resp.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock)
|
||||
require.Equal(t, true, ok)
|
||||
assert.DeepEqual(t, expected, capellaBlock.CapellaBlock)
|
||||
assert.Equal(t, ethpbv2.Version_CAPELLA, resp.Version)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
b := util.NewBlindedBeaconBlockDeneb()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{}
|
||||
bs := &Server{
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
expected, err := migration.V1Alpha1BeaconBlockBlindedDenebToV2Blinded(b.Message)
|
||||
require.NoError(t, err)
|
||||
resp, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
denebBlock, ok := resp.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_DenebBlock)
|
||||
require.Equal(t, true, ok)
|
||||
assert.DeepEqual(t, expected, denebBlock.DenebBlock)
|
||||
assert.Equal(t, ethpbv2.Version_DENEB, resp.Version)
|
||||
})
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
b := util.NewBlindedBeaconBlockBellatrix()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{
|
||||
OptimisticRoots: map[[32]byte]bool{r: true},
|
||||
}
|
||||
bs := &Server{
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
resp, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
})
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{root: true},
|
||||
}
|
||||
bs := &Server{
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
}
|
||||
|
||||
resp, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{BlockId: root[:]})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, resp.Finalized)
|
||||
})
|
||||
t.Run("not finalized", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{root: false},
|
||||
}
|
||||
bs := &Server{
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
}
|
||||
|
||||
resp, err := bs.GetBlindedBlock(ctx, ðpbv1.BlockRequest{BlockId: root[:]})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_GetBlindedBlockSSZ(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
bs := &Server{
|
||||
FinalizationFetcher: &mock.ChainService{},
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
}
|
||||
|
||||
expected, err := blk.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
assert.DeepEqual(t, expected, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_PHASE0, resp.Version)
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
b := util.NewBeaconBlockAltair()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
bs := &Server{
|
||||
FinalizationFetcher: &mock.ChainService{},
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
}
|
||||
|
||||
expected, err := blk.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
assert.DeepEqual(t, expected, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_ALTAIR, resp.Version)
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
b := util.NewBlindedBeaconBlockBellatrix()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{}
|
||||
bs := &Server{
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
expected, err := blk.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
assert.DeepEqual(t, expected, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
b := util.NewBlindedBeaconBlockCapella()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{}
|
||||
bs := &Server{
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
expected, err := blk.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
assert.DeepEqual(t, expected, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_CAPELLA, resp.Version)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
b := util.NewBlindedBeaconBlockDeneb()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{}
|
||||
bs := &Server{
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
expected, err := blk.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
assert.DeepEqual(t, expected, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_DENEB, resp.Version)
|
||||
})
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
b := util.NewBlindedBeaconBlockBellatrix()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{
|
||||
OptimisticRoots: map[[32]byte]bool{r: true},
|
||||
}
|
||||
bs := &Server{
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
})
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{root: true},
|
||||
}
|
||||
bs := &Server{
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
}
|
||||
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{BlockId: root[:]})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, resp.Finalized)
|
||||
})
|
||||
t.Run("not finalized", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
blk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{root: false},
|
||||
}
|
||||
bs := &Server{
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: blk},
|
||||
}
|
||||
|
||||
resp, err := bs.GetBlindedBlockSSZ(ctx, ðpbv1.BlockRequest{BlockId: root[:]})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -6,23 +6,12 @@ import (
|
||||
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
rpchelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
@@ -71,754 +60,3 @@ func (bs *Server) GetWeakSubjectivity(ctx context.Context, _ *empty.Empty) (*eth
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetBlock retrieves block details for given block ID.
|
||||
// DEPRECATED: please use GetBlockV2 instead
|
||||
func (bs *Server) GetBlock(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv1.BlockResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlock")
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signedBeaconBlock, err := migration.SignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
|
||||
return ðpbv1.BlockResponse{
|
||||
Data: ðpbv1.BeaconBlockContainer{
|
||||
Message: signedBeaconBlock.Block,
|
||||
Signature: signedBeaconBlock.Signature,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetBlockSSZ returns the SSZ-serialized version of the becaon block for given block ID.
|
||||
// DEPRECATED: please use GetBlockV2SSZ instead
|
||||
func (bs *Server) GetBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv1.BlockSSZResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlockSSZ")
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signedBeaconBlock, err := migration.SignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
sszBlock, err := signedBeaconBlock.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not marshal block into SSZ: %v", err)
|
||||
}
|
||||
|
||||
return ðpbv1.BlockSSZResponse{Data: sszBlock}, nil
|
||||
}
|
||||
|
||||
// GetBlockV2 retrieves block details for given block ID.
|
||||
func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (*ethpbv2.BlockResponseV2, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlockV2")
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
|
||||
result, err := getBlockPhase0(blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs(api.VersionHeader, version.String(blk.Version()))); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set "+api.VersionHeader+" header: %v", err)
|
||||
}
|
||||
result, err = getBlockAltair(blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = bs.getBlockBellatrix(ctx, blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = bs.getBlockCapella(ctx, blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
|
||||
result, err = bs.getBlockDeneb(ctx, blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
|
||||
}
|
||||
|
||||
// GetBlockSSZV2 returns the SSZ-serialized version of the beacon block for given block ID.
|
||||
func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (*ethpbv2.SSZContainer, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlockSSZV2")
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
|
||||
result, err := getSSZBlockPhase0(blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = getSSZBlockAltair(blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = bs.getSSZBlockBellatrix(ctx, blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = bs.getSSZBlockCapella(ctx, blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
|
||||
result, err = bs.getSSZBlockDeneb(ctx, blk)
|
||||
if result != nil {
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
|
||||
}
|
||||
|
||||
// ListBlockAttestations retrieves attestation included in requested block.
|
||||
func (bs *Server) ListBlockAttestations(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv1.BlockAttestationsResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.ListBlockAttestations")
|
||||
defer span.End()
|
||||
|
||||
blk, err := bs.Blocker.Block(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v1Alpha1Attestations := blk.Block().Body().Attestations()
|
||||
v1Attestations := make([]*ethpbv1.Attestation, 0, len(v1Alpha1Attestations))
|
||||
for _, att := range v1Alpha1Attestations {
|
||||
migratedAtt := migration.V1Alpha1AttestationToV1(att)
|
||||
v1Attestations = append(v1Attestations, migratedAtt)
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
return ðpbv1.BlockAttestationsResponse{
|
||||
Data: v1Attestations,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: bs.FinalizationFetcher.IsFinalized(ctx, root),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func handleGetBlockError(blk interfaces.ReadOnlySignedBeaconBlock, err error) error {
|
||||
if invalidBlockIdErr, ok := err.(*lookup.BlockIdParseError); ok {
|
||||
return status.Errorf(codes.InvalidArgument, "Invalid block ID: %v", invalidBlockIdErr)
|
||||
}
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not get block from block ID: %v", err)
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return status.Errorf(codes.NotFound, "Could not find requested block: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getBlockPhase0(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlockResponseV2, error) {
|
||||
phase0Blk, err := blk.PbPhase0Block()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if phase0Blk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v1Blk, err := migration.SignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
return ðpbv2.BlockResponseV2{
|
||||
Version: ethpbv2.Version_PHASE0,
|
||||
Data: ðpbv2.SignedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBeaconBlockContainer_Phase0Block{Phase0Block: v1Blk.Block},
|
||||
Signature: v1Blk.Signature,
|
||||
},
|
||||
ExecutionOptimistic: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getBlockAltair(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlockResponseV2, error) {
|
||||
altairBlk, err := blk.PbAltairBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if altairBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlockResponseV2{
|
||||
Version: ethpbv2.Version_ALTAIR,
|
||||
Data: ðpbv2.SignedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBeaconBlockContainer_AltairBlock{AltairBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) getBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlockResponseV2, error) {
|
||||
bellatrixBlk, err := blk.PbBellatrixBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
|
||||
if blindedBellatrixBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
signedFullBlock, err := bs.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
|
||||
}
|
||||
bellatrixBlk, err = signedFullBlock.PbBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBellatrixToV2(bellatrixBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlockResponseV2{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: ðpbv2.SignedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if bellatrixBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBellatrixToV2(bellatrixBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlockResponseV2{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: ðpbv2.SignedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) getBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlockResponseV2, error) {
|
||||
capellaBlk, err := blk.PbCapellaBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedCapellaBlk, err := blk.PbBlindedCapellaBlock(); err == nil {
|
||||
if blindedCapellaBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
signedFullBlock, err := bs.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
|
||||
}
|
||||
capellaBlk, err = signedFullBlock.PbCapellaBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockCapellaToV2(capellaBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlockResponseV2{
|
||||
Version: ethpbv2.Version_CAPELLA,
|
||||
Data: ðpbv2.SignedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBeaconBlockContainer_CapellaBlock{CapellaBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if capellaBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockCapellaToV2(capellaBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlockResponseV2{
|
||||
Version: ethpbv2.Version_CAPELLA,
|
||||
Data: ðpbv2.SignedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBeaconBlockContainer_CapellaBlock{CapellaBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) getBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlockResponseV2, error) {
|
||||
denebBlk, err := blk.PbDenebBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedDenebBlk, err := blk.PbBlindedDenebBlock(); err == nil {
|
||||
if blindedDenebBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
signedFullBlock, err := bs.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
|
||||
}
|
||||
denebBlk, err = signedFullBlock.PbDenebBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlockResponseV2{
|
||||
Version: ethpbv2.Version_DENEB,
|
||||
Data: ðpbv2.SignedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBeaconBlockContainer_DenebBlock{DenebBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if denebBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlockResponseV2{
|
||||
Version: ethpbv2.Version_DENEB,
|
||||
Data: ðpbv2.SignedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBeaconBlockContainer_DenebBlock{DenebBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getSSZBlockPhase0(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
|
||||
phase0Blk, err := blk.PbPhase0Block()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if phase0Blk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
signedBeaconBlock, err := migration.SignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
sszBlock, err := signedBeaconBlock.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_PHASE0, ExecutionOptimistic: false, Data: sszBlock}, nil
|
||||
}
|
||||
|
||||
func getSSZBlockAltair(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
|
||||
altairBlk, err := blk.PbAltairBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if altairBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
data := ðpbv2.SignedBeaconBlockAltair{
|
||||
Message: v2Blk,
|
||||
Signature: sig[:],
|
||||
}
|
||||
sszData, err := data.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_ALTAIR, ExecutionOptimistic: false, Data: sszData}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) getSSZBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
|
||||
bellatrixBlk, err := blk.PbBellatrixBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
|
||||
if blindedBellatrixBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
signedFullBlock, err := bs.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
|
||||
}
|
||||
bellatrixBlk, err = signedFullBlock.PbBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBellatrixToV2(bellatrixBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert signed beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
data := ðpbv2.SignedBeaconBlockBellatrix{
|
||||
Message: v2Blk,
|
||||
Signature: sig[:],
|
||||
}
|
||||
sszData, err := data.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Data: sszData,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if bellatrixBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBellatrixToV2(bellatrixBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert signed beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
data := ðpbv2.SignedBeaconBlockBellatrix{
|
||||
Message: v2Blk,
|
||||
Signature: sig[:],
|
||||
}
|
||||
sszData, err := data.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_BELLATRIX, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) getSSZBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
|
||||
capellaBlk, err := blk.PbCapellaBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedCapellaBlk, err := blk.PbBlindedCapellaBlock(); err == nil {
|
||||
if blindedCapellaBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
signedFullBlock, err := bs.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
|
||||
}
|
||||
capellaBlk, err = signedFullBlock.PbCapellaBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockCapellaToV2(capellaBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert signed beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
data := ðpbv2.SignedBeaconBlockCapella{
|
||||
Message: v2Blk,
|
||||
Signature: sig[:],
|
||||
}
|
||||
sszData, err := data.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_CAPELLA,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Data: sszData,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if capellaBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockCapellaToV2(capellaBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert signed beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
data := ðpbv2.SignedBeaconBlockCapella{
|
||||
Message: v2Blk,
|
||||
Signature: sig[:],
|
||||
}
|
||||
sszData, err := data.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_CAPELLA, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) getSSZBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
|
||||
denebBlk, err := blk.PbDenebBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedDenebBlk, err := blk.PbBlindedDenebBlock(); err == nil {
|
||||
if blindedDenebBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
signedFullBlock, err := bs.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
|
||||
}
|
||||
denebBlk, err = signedFullBlock.PbDenebBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert signed beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
data := ðpbv2.SignedBeaconBlockDeneb{
|
||||
Message: v2Blk,
|
||||
Signature: sig[:],
|
||||
}
|
||||
sszData, err := data.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_DENEB,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Data: sszData,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if denebBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert signed beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
data := ðpbv2.SignedBeaconBlockDeneb{
|
||||
Message: v2Blk,
|
||||
Signature: sig[:],
|
||||
}
|
||||
sszData, err := data.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_DENEB, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
|
||||
}
|
||||
|
||||
@@ -1,688 +1 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
ethpbalpha "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func fillDBTestBlocks(ctx context.Context, t *testing.T, beaconDB db.Database) (*ethpbalpha.SignedBeaconBlock, []*ethpbalpha.BeaconBlockContainer) {
|
||||
parentRoot := [32]byte{1, 2, 3}
|
||||
genBlk := util.NewBeaconBlock()
|
||||
genBlk.Block.ParentRoot = parentRoot[:]
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, genBlk)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
count := primitives.Slot(100)
|
||||
blks := make([]interfaces.ReadOnlySignedBeaconBlock, count)
|
||||
blkContainers := make([]*ethpbalpha.BeaconBlockContainer, count)
|
||||
for i := primitives.Slot(0); i < count; i++ {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = bytesutil.PadTo([]byte{uint8(i)}, 32)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
blks[i], err = blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
blkContainers[i] = ðpbalpha.BeaconBlockContainer{
|
||||
Block: ðpbalpha.BeaconBlockContainer_Phase0Block{Phase0Block: b},
|
||||
BlockRoot: root[:],
|
||||
}
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveBlocks(ctx, blks))
|
||||
headRoot := bytesutil.ToBytes32(blkContainers[len(blks)-1].BlockRoot)
|
||||
summary := ðpbalpha.StateSummary{
|
||||
Root: headRoot[:],
|
||||
Slot: blkContainers[len(blks)-1].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block.Block.Slot,
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, summary))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
|
||||
return genBlk, blkContainers
|
||||
}
|
||||
|
||||
func TestServer_GetBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 123
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
bs := &Server{
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: sb},
|
||||
}
|
||||
|
||||
blk, err := bs.GetBlock(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
v1Block, err := migration.V1Alpha1ToV1SignedBlock(b)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, v1Block.Block, blk.Data.Message)
|
||||
}
|
||||
|
||||
func TestServer_GetBlockV2(t *testing.T) {
|
||||
stream := &runtime.ServerTransportStream{}
|
||||
ctx := grpc.NewContextWithServerTransportStream(context.Background(), stream)
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 123
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
mockBlockFetcher := &testutil.MockBlocker{BlockToReturn: sb}
|
||||
mockChainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
bs := &Server{
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
|
||||
blk, err := bs.GetBlockV2(ctx, ðpbv2.BlockRequestV2{})
|
||||
require.NoError(t, err)
|
||||
|
||||
v1Block, err := migration.V1Alpha1ToV1SignedBlock(b)
|
||||
require.NoError(t, err)
|
||||
phase0Block, ok := blk.Data.Message.(*ethpbv2.SignedBeaconBlockContainer_Phase0Block)
|
||||
require.Equal(t, true, ok)
|
||||
assert.DeepEqual(t, v1Block.Block, phase0Block.Phase0Block)
|
||||
assert.Equal(t, ethpbv2.Version_PHASE0, blk.Version)
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
b := util.NewBeaconBlockAltair()
|
||||
b.Block.Slot = 123
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
mockBlockFetcher := &testutil.MockBlocker{BlockToReturn: sb}
|
||||
mockChainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
bs := &Server{
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
|
||||
blk, err := bs.GetBlockV2(ctx, ðpbv2.BlockRequestV2{})
|
||||
require.NoError(t, err)
|
||||
|
||||
v1Block, err := migration.V1Alpha1BeaconBlockAltairToV2(b.Block)
|
||||
require.NoError(t, err)
|
||||
altairBlock, ok := blk.Data.Message.(*ethpbv2.SignedBeaconBlockContainer_AltairBlock)
|
||||
require.Equal(t, true, ok)
|
||||
assert.DeepEqual(t, v1Block, altairBlock.AltairBlock)
|
||||
assert.Equal(t, ethpbv2.Version_ALTAIR, blk.Version)
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
b.Block.Slot = 123
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
mockBlockFetcher := &testutil.MockBlocker{BlockToReturn: sb}
|
||||
mockChainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
bs := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
|
||||
blk, err := bs.GetBlockV2(ctx, ðpbv2.BlockRequestV2{})
|
||||
require.NoError(t, err)
|
||||
|
||||
v1Block, err := migration.V1Alpha1BeaconBlockBellatrixToV2(b.Block)
|
||||
require.NoError(t, err)
|
||||
bellatrixBlock, ok := blk.Data.Message.(*ethpbv2.SignedBeaconBlockContainer_BellatrixBlock)
|
||||
require.Equal(t, true, ok)
|
||||
assert.DeepEqual(t, v1Block, bellatrixBlock.BellatrixBlock)
|
||||
assert.Equal(t, ethpbv2.Version_BELLATRIX, blk.Version)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
b := util.NewBeaconBlockCapella()
|
||||
b.Block.Slot = 123
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
mockBlockFetcher := &testutil.MockBlocker{BlockToReturn: sb}
|
||||
mockChainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
bs := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
|
||||
blk, err := bs.GetBlockV2(ctx, ðpbv2.BlockRequestV2{})
|
||||
require.NoError(t, err)
|
||||
|
||||
v1Block, err := migration.V1Alpha1BeaconBlockCapellaToV2(b.Block)
|
||||
require.NoError(t, err)
|
||||
bellatrixBlock, ok := blk.Data.Message.(*ethpbv2.SignedBeaconBlockContainer_CapellaBlock)
|
||||
require.Equal(t, true, ok)
|
||||
assert.DeepEqual(t, v1Block, bellatrixBlock.CapellaBlock)
|
||||
assert.Equal(t, ethpbv2.Version_CAPELLA, blk.Version)
|
||||
})
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := sb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
mockBlockFetcher := &testutil.MockBlocker{BlockToReturn: sb}
|
||||
mockChainService := &mock.ChainService{
|
||||
OptimisticRoots: map[[32]byte]bool{r: true},
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
bs := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
|
||||
blk, err := bs.GetBlockV2(ctx, ðpbv2.BlockRequestV2{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, blk.ExecutionOptimistic)
|
||||
})
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := sb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
mockBlockFetcher := &testutil.MockBlocker{BlockToReturn: sb}
|
||||
|
||||
t.Run("true", func(t *testing.T) {
|
||||
mockChainService := &mock.ChainService{FinalizedRoots: map[[32]byte]bool{r: true}}
|
||||
bs := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
|
||||
header, err := bs.GetBlockV2(ctx, ðpbv2.BlockRequestV2{BlockId: r[:]})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, header.Finalized)
|
||||
})
|
||||
t.Run("false", func(t *testing.T) {
|
||||
mockChainService := &mock.ChainService{FinalizedRoots: map[[32]byte]bool{r: false}}
|
||||
bs := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
|
||||
resp, err := bs.GetBlockV2(ctx, ðpbv2.BlockRequestV2{BlockId: r[:]})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_GetBlockSSZ(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 123
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
bs := &Server{
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: sb},
|
||||
}
|
||||
|
||||
resp, err := bs.GetBlockSSZ(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
sszBlock, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, sszBlock, resp.Data)
|
||||
}
|
||||
|
||||
func TestServer_GetBlockSSZV2(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 123
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
bs := &Server{
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: sb},
|
||||
}
|
||||
|
||||
resp, err := bs.GetBlockSSZV2(ctx, ðpbv2.BlockRequestV2{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
sszBlock, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, sszBlock, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_PHASE0, resp.Version)
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
b := util.NewBeaconBlockAltair()
|
||||
b.Block.Slot = 123
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
bs := &Server{
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: sb},
|
||||
}
|
||||
|
||||
resp, err := bs.GetBlockSSZV2(ctx, ðpbv2.BlockRequestV2{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
sszBlock, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, sszBlock, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_ALTAIR, resp.Version)
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
b.Block.Slot = 123
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
bs := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: sb},
|
||||
}
|
||||
|
||||
resp, err := bs.GetBlockSSZV2(ctx, ðpbv2.BlockRequestV2{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
sszBlock, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, sszBlock, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
b := util.NewBeaconBlockCapella()
|
||||
b.Block.Slot = 123
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
bs := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: sb},
|
||||
}
|
||||
|
||||
resp, err := bs.GetBlockSSZV2(ctx, ðpbv2.BlockRequestV2{})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
sszBlock, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, sszBlock, resp.Data)
|
||||
assert.Equal(t, ethpbv2.Version_CAPELLA, resp.Version)
|
||||
})
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := sb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{
|
||||
OptimisticRoots: map[[32]byte]bool{r: true},
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
bs := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: sb},
|
||||
}
|
||||
|
||||
resp, err := bs.GetBlockSSZV2(ctx, ðpbv2.BlockRequestV2{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
})
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := sb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
mockBlockFetcher := &testutil.MockBlocker{BlockToReturn: sb}
|
||||
|
||||
t.Run("true", func(t *testing.T) {
|
||||
mockChainService := &mock.ChainService{FinalizedRoots: map[[32]byte]bool{r: true}}
|
||||
bs := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
|
||||
header, err := bs.GetBlockSSZV2(ctx, ðpbv2.BlockRequestV2{BlockId: r[:]})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, header.Finalized)
|
||||
})
|
||||
t.Run("false", func(t *testing.T) {
|
||||
mockChainService := &mock.ChainService{FinalizedRoots: map[[32]byte]bool{r: false}}
|
||||
bs := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
|
||||
resp, err := bs.GetBlockSSZV2(ctx, ðpbv2.BlockRequestV2{BlockId: r[:]})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_ListBlockAttestations(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{
|
||||
{
|
||||
AggregationBits: bitfield.Bitlist{0x00},
|
||||
Data: ðpbalpha.AttestationData{
|
||||
Slot: 123,
|
||||
CommitteeIndex: 123,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("root1"), 32),
|
||||
Source: ðpbalpha.Checkpoint{
|
||||
Epoch: 123,
|
||||
Root: bytesutil.PadTo([]byte("root1"), 32),
|
||||
},
|
||||
Target: ðpbalpha.Checkpoint{
|
||||
Epoch: 123,
|
||||
Root: bytesutil.PadTo([]byte("root1"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("sig1"), 96),
|
||||
},
|
||||
{
|
||||
AggregationBits: bitfield.Bitlist{0x01},
|
||||
Data: ðpbalpha.AttestationData{
|
||||
Slot: 456,
|
||||
CommitteeIndex: 456,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("root2"), 32),
|
||||
Source: ðpbalpha.Checkpoint{
|
||||
Epoch: 456,
|
||||
Root: bytesutil.PadTo([]byte("root2"), 32),
|
||||
},
|
||||
Target: ðpbalpha.Checkpoint{
|
||||
Epoch: 456,
|
||||
Root: bytesutil.PadTo([]byte("root2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("sig2"), 96),
|
||||
},
|
||||
}
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
mockBlockFetcher := &testutil.MockBlocker{BlockToReturn: sb}
|
||||
mockChainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
bs := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
|
||||
resp, err := bs.ListBlockAttestations(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
v1Block, err := migration.V1Alpha1ToV1SignedBlock(b)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, v1Block.Block.Body.Attestations, resp.Data)
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
b := util.NewBeaconBlockAltair()
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{
|
||||
{
|
||||
AggregationBits: bitfield.Bitlist{0x00},
|
||||
Data: ðpbalpha.AttestationData{
|
||||
Slot: 123,
|
||||
CommitteeIndex: 123,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("root1"), 32),
|
||||
Source: ðpbalpha.Checkpoint{
|
||||
Epoch: 123,
|
||||
Root: bytesutil.PadTo([]byte("root1"), 32),
|
||||
},
|
||||
Target: ðpbalpha.Checkpoint{
|
||||
Epoch: 123,
|
||||
Root: bytesutil.PadTo([]byte("root1"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("sig1"), 96),
|
||||
},
|
||||
{
|
||||
AggregationBits: bitfield.Bitlist{0x01},
|
||||
Data: ðpbalpha.AttestationData{
|
||||
Slot: 456,
|
||||
CommitteeIndex: 456,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("root2"), 32),
|
||||
Source: ðpbalpha.Checkpoint{
|
||||
Epoch: 456,
|
||||
Root: bytesutil.PadTo([]byte("root2"), 32),
|
||||
},
|
||||
Target: ðpbalpha.Checkpoint{
|
||||
Epoch: 456,
|
||||
Root: bytesutil.PadTo([]byte("root2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("sig2"), 96),
|
||||
},
|
||||
}
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
mockBlockFetcher := &testutil.MockBlocker{BlockToReturn: sb}
|
||||
mockChainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
bs := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
|
||||
resp, err := bs.ListBlockAttestations(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
v1Block, err := migration.V1Alpha1BeaconBlockAltairToV2(b.Block)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, v1Block.Body.Attestations, resp.Data)
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{
|
||||
{
|
||||
AggregationBits: bitfield.Bitlist{0x00},
|
||||
Data: ðpbalpha.AttestationData{
|
||||
Slot: 123,
|
||||
CommitteeIndex: 123,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("root1"), 32),
|
||||
Source: ðpbalpha.Checkpoint{
|
||||
Epoch: 123,
|
||||
Root: bytesutil.PadTo([]byte("root1"), 32),
|
||||
},
|
||||
Target: ðpbalpha.Checkpoint{
|
||||
Epoch: 123,
|
||||
Root: bytesutil.PadTo([]byte("root1"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("sig1"), 96),
|
||||
},
|
||||
{
|
||||
AggregationBits: bitfield.Bitlist{0x01},
|
||||
Data: ðpbalpha.AttestationData{
|
||||
Slot: 456,
|
||||
CommitteeIndex: 456,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("root2"), 32),
|
||||
Source: ðpbalpha.Checkpoint{
|
||||
Epoch: 456,
|
||||
Root: bytesutil.PadTo([]byte("root2"), 32),
|
||||
},
|
||||
Target: ðpbalpha.Checkpoint{
|
||||
Epoch: 456,
|
||||
Root: bytesutil.PadTo([]byte("root2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("sig2"), 96),
|
||||
},
|
||||
}
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
mockBlockFetcher := &testutil.MockBlocker{BlockToReturn: sb}
|
||||
mockChainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
bs := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
|
||||
resp, err := bs.ListBlockAttestations(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
v1Block, err := migration.V1Alpha1BeaconBlockBellatrixToV2(b.Block)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, v1Block.Body.Attestations, resp.Data)
|
||||
})
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
b := util.NewBeaconBlockCapella()
|
||||
b.Block.Body.Attestations = []*ethpbalpha.Attestation{
|
||||
{
|
||||
AggregationBits: bitfield.Bitlist{0x00},
|
||||
Data: ðpbalpha.AttestationData{
|
||||
Slot: 123,
|
||||
CommitteeIndex: 123,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("root1"), 32),
|
||||
Source: ðpbalpha.Checkpoint{
|
||||
Epoch: 123,
|
||||
Root: bytesutil.PadTo([]byte("root1"), 32),
|
||||
},
|
||||
Target: ðpbalpha.Checkpoint{
|
||||
Epoch: 123,
|
||||
Root: bytesutil.PadTo([]byte("root1"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("sig1"), 96),
|
||||
},
|
||||
{
|
||||
AggregationBits: bitfield.Bitlist{0x01},
|
||||
Data: ðpbalpha.AttestationData{
|
||||
Slot: 456,
|
||||
CommitteeIndex: 456,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("root2"), 32),
|
||||
Source: ðpbalpha.Checkpoint{
|
||||
Epoch: 456,
|
||||
Root: bytesutil.PadTo([]byte("root2"), 32),
|
||||
},
|
||||
Target: ðpbalpha.Checkpoint{
|
||||
Epoch: 456,
|
||||
Root: bytesutil.PadTo([]byte("root2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("sig2"), 96),
|
||||
},
|
||||
}
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
mockBlockFetcher := &testutil.MockBlocker{BlockToReturn: sb}
|
||||
mockChainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
bs := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
|
||||
resp, err := bs.ListBlockAttestations(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
|
||||
v1Block, err := migration.V1Alpha1BeaconBlockCapellaToV2(b.Block)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, v1Block.Body.Attestations, resp.Data)
|
||||
})
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := sb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
mockBlockFetcher := &testutil.MockBlocker{BlockToReturn: sb}
|
||||
mockChainService := &mock.ChainService{
|
||||
OptimisticRoots: map[[32]byte]bool{r: true},
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
bs := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
|
||||
resp, err := bs.ListBlockAttestations(ctx, ðpbv1.BlockRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
})
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := sb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
mockBlockFetcher := &testutil.MockBlocker{BlockToReturn: sb}
|
||||
|
||||
t.Run("true", func(t *testing.T) {
|
||||
mockChainService := &mock.ChainService{FinalizedRoots: map[[32]byte]bool{r: true}}
|
||||
bs := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
|
||||
resp, err := bs.ListBlockAttestations(ctx, ðpbv1.BlockRequest{BlockId: r[:]})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, resp.Finalized)
|
||||
})
|
||||
t.Run("false", func(t *testing.T) {
|
||||
mockChainService := &mock.ChainService{FinalizedRoots: map[[32]byte]bool{r: false}}
|
||||
bs := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: mockBlockFetcher,
|
||||
}
|
||||
|
||||
resp, err := bs.ListBlockAttestations(ctx, ðpbv1.BlockRequest{BlockId: r[:]})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -38,6 +39,908 @@ const (
|
||||
broadcastValidationConsensusAndEquivocation = "consensus_and_equivocation"
|
||||
)
|
||||
|
||||
type handled bool
|
||||
|
||||
// GetBlock retrieves block details for given block ID.
|
||||
//
|
||||
// DEPRECATED: please use GetBlockV2 instead
|
||||
func (s *Server) GetBlock(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetBlock")
|
||||
defer span.End()
|
||||
|
||||
blockId := mux.Vars(r)["block_id"]
|
||||
if blockId == "" {
|
||||
http2.HandleError(w, "block_id is required in URL params", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
blk, err := s.Blocker.Block(ctx, []byte(blockId))
|
||||
if !shared.WriteBlockFetchError(w, blk, err) {
|
||||
return
|
||||
}
|
||||
|
||||
if http2.SszRequested(r) {
|
||||
s.getBlockSSZ(ctx, w, blk)
|
||||
} else {
|
||||
s.getBlock(ctx, w, blk)
|
||||
}
|
||||
}
|
||||
|
||||
// getBlock returns the JSON-serialized version of the beacon block for given block ID.
|
||||
func (s *Server) getBlock(ctx context.Context, w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock) {
|
||||
v2Resp, err := s.getBlockPhase0(ctx, blk)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not get block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp := &GetBlockResponse{Data: v2Resp.Data}
|
||||
http2.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
// getBlockSSZ returns the SSZ-serialized version of the becaon block for given block ID.
|
||||
func (s *Server) getBlockSSZ(ctx context.Context, w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock) {
|
||||
resp, err := s.getBlockPhase0SSZ(ctx, blk)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not get block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
http2.WriteSsz(w, resp, "beacon_block.ssz")
|
||||
}
|
||||
|
||||
// GetBlockV2 retrieves block details for given block ID.
|
||||
func (s *Server) GetBlockV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetBlockV2")
|
||||
defer span.End()
|
||||
|
||||
blockId := mux.Vars(r)["block_id"]
|
||||
if blockId == "" {
|
||||
http2.HandleError(w, "block_id is required in URL params", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
blk, err := s.Blocker.Block(ctx, []byte(blockId))
|
||||
if !shared.WriteBlockFetchError(w, blk, err) {
|
||||
return
|
||||
}
|
||||
|
||||
if http2.SszRequested(r) {
|
||||
s.getBlockSSZV2(ctx, w, blk)
|
||||
} else {
|
||||
s.getBlockV2(ctx, w, blk)
|
||||
}
|
||||
}
|
||||
|
||||
// getBlockV2 returns the JSON-serialized version of the beacon block for given block ID.
|
||||
func (s *Server) getBlockV2(ctx context.Context, w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock) {
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not get block root "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
finalized := s.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
|
||||
getBlockHandler := func(get func(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error)) handled {
|
||||
result, err := get(ctx, blk)
|
||||
if result != nil {
|
||||
result.Finalized = finalized
|
||||
w.Header().Set(api.VersionHeader, result.Version)
|
||||
http2.WriteJson(w, result)
|
||||
return true
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
http2.HandleError(w, "Could not get signed beacon block: "+err.Error(), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if getBlockHandler(s.getBlockDeneb) {
|
||||
return
|
||||
}
|
||||
if getBlockHandler(s.getBlockCapella) {
|
||||
return
|
||||
}
|
||||
if getBlockHandler(s.getBlockBellatrix) {
|
||||
return
|
||||
}
|
||||
if getBlockHandler(s.getBlockAltair) {
|
||||
return
|
||||
}
|
||||
if getBlockHandler(s.getBlockPhase0) {
|
||||
return
|
||||
}
|
||||
http2.HandleError(w, fmt.Sprintf("Unknown block type %T", blk), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// getBlockSSZV2 returns the SSZ-serialized version of the beacon block for given block ID.
|
||||
func (s *Server) getBlockSSZV2(ctx context.Context, w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock) {
|
||||
getBlockHandler := func(get func(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) ([]byte, error), ver string) handled {
|
||||
result, err := get(ctx, blk)
|
||||
if result != nil {
|
||||
w.Header().Set(api.VersionHeader, ver)
|
||||
http2.WriteSsz(w, result, "beacon_block.ssz")
|
||||
return true
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
http2.HandleError(w, "Could not get signed beacon block: "+err.Error(), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if getBlockHandler(s.getBlockDenebSSZ, version.String(version.Deneb)) {
|
||||
return
|
||||
}
|
||||
if getBlockHandler(s.getBlockCapellaSSZ, version.String(version.Capella)) {
|
||||
return
|
||||
}
|
||||
if getBlockHandler(s.getBlockBellatrixSSZ, version.String(version.Bellatrix)) {
|
||||
return
|
||||
}
|
||||
if getBlockHandler(s.getBlockAltairSSZ, version.String(version.Altair)) {
|
||||
return
|
||||
}
|
||||
if getBlockHandler(s.getBlockPhase0SSZ, version.String(version.Phase0)) {
|
||||
return
|
||||
}
|
||||
http2.HandleError(w, fmt.Sprintf("Unknown block type %T", blk), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// GetBlindedBlock retrieves blinded block for given block id.
|
||||
func (s *Server) GetBlindedBlock(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetBlindedBlock")
|
||||
defer span.End()
|
||||
|
||||
blockId := mux.Vars(r)["block_id"]
|
||||
if blockId == "" {
|
||||
http2.HandleError(w, "block_id is required in URL params", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
blk, err := s.Blocker.Block(ctx, []byte(blockId))
|
||||
if !shared.WriteBlockFetchError(w, blk, err) {
|
||||
return
|
||||
}
|
||||
|
||||
if http2.SszRequested(r) {
|
||||
s.getBlindedBlockSSZ(ctx, w, blk)
|
||||
} else {
|
||||
s.getBlindedBlock(ctx, w, blk)
|
||||
}
|
||||
}
|
||||
|
||||
// getBlindedBlock returns the JSON-serialized version of the blinded beacon block for given block id.
|
||||
func (s *Server) getBlindedBlock(ctx context.Context, w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock) {
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not get block root "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
finalized := s.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
|
||||
getBlockHandler := func(get func(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error)) handled {
|
||||
result, err := get(ctx, blk)
|
||||
if result != nil {
|
||||
result.Finalized = finalized
|
||||
w.Header().Set(api.VersionHeader, result.Version)
|
||||
http2.WriteJson(w, result)
|
||||
return true
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
http2.HandleError(w, "Could not get signed beacon block: "+err.Error(), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if getBlockHandler(s.getBlockPhase0) {
|
||||
return
|
||||
}
|
||||
if getBlockHandler(s.getBlockAltair) {
|
||||
return
|
||||
}
|
||||
if getBlockHandler(s.getBlindedBlockBellatrix) {
|
||||
return
|
||||
}
|
||||
if getBlockHandler(s.getBlindedBlockCapella) {
|
||||
return
|
||||
}
|
||||
if getBlockHandler(s.getBlindedBlockDeneb) {
|
||||
return
|
||||
}
|
||||
http2.HandleError(w, fmt.Sprintf("Unknown block type %T", blk), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// getBlindedBlockSSZ returns the SSZ-serialized version of the blinded beacon block for given block id.
|
||||
func (s *Server) getBlindedBlockSSZ(ctx context.Context, w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock) {
|
||||
getBlockHandler := func(get func(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) ([]byte, error), ver string) handled {
|
||||
result, err := get(ctx, blk)
|
||||
if result != nil {
|
||||
w.Header().Set(api.VersionHeader, ver)
|
||||
http2.WriteSsz(w, result, "beacon_block.ssz")
|
||||
return true
|
||||
}
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
http2.HandleError(w, "Could not get signed beacon block: "+err.Error(), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if getBlockHandler(s.getBlockPhase0SSZ, version.String(version.Phase0)) {
|
||||
return
|
||||
}
|
||||
if getBlockHandler(s.getBlockAltairSSZ, version.String(version.Altair)) {
|
||||
return
|
||||
}
|
||||
if getBlockHandler(s.getBlindedBlockBellatrixSSZ, version.String(version.Bellatrix)) {
|
||||
return
|
||||
}
|
||||
if getBlockHandler(s.getBlindedBlockCapellaSSZ, version.String(version.Capella)) {
|
||||
return
|
||||
}
|
||||
if getBlockHandler(s.getBlindedBlockDenebSSZ, version.String(version.Deneb)) {
|
||||
return
|
||||
}
|
||||
http2.HandleError(w, fmt.Sprintf("Unknown block type %T", blk), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
func (*Server) getBlockPhase0(_ context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error) {
|
||||
consensusBlk, err := blk.PbPhase0Block()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if consensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
respBlk, err := shared.SignedBeaconBlockFromConsensus(consensusBlk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jsonBytes, err := json.Marshal(respBlk.Message)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &GetBlockV2Response{
|
||||
Version: version.String(version.Phase0),
|
||||
ExecutionOptimistic: false,
|
||||
Data: &SignedBlock{
|
||||
Message: jsonBytes,
|
||||
Signature: respBlk.Signature,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (*Server) getBlockAltair(_ context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error) {
|
||||
consensusBlk, err := blk.PbAltairBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if consensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
respBlk, err := shared.SignedBeaconBlockAltairFromConsensus(consensusBlk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jsonBytes, err := json.Marshal(respBlk.Message)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &GetBlockV2Response{
|
||||
Version: version.String(version.Altair),
|
||||
ExecutionOptimistic: false,
|
||||
Data: &SignedBlock{
|
||||
Message: jsonBytes,
|
||||
Signature: respBlk.Signature,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) getBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error) {
|
||||
consensusBlk, err := blk.PbBellatrixBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
blindedConsensusBlk, err := blk.PbBlindedBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if blindedConsensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
fullBlk, err := s.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
|
||||
}
|
||||
consensusBlk, err = fullBlk.PbBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if consensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
respBlk, err := shared.SignedBeaconBlockBellatrixFromConsensus(consensusBlk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jsonBytes, err := json.Marshal(respBlk.Message)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &GetBlockV2Response{
|
||||
Version: version.String(version.Bellatrix),
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Data: &SignedBlock{
|
||||
Message: jsonBytes,
|
||||
Signature: respBlk.Signature,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) getBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error) {
|
||||
consensusBlk, err := blk.PbCapellaBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
blindedConsensusBlk, err := blk.PbBlindedCapellaBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if blindedConsensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
fullBlk, err := s.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
|
||||
}
|
||||
consensusBlk, err = fullBlk.PbCapellaBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if consensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
respBlk, err := shared.SignedBeaconBlockCapellaFromConsensus(consensusBlk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jsonBytes, err := json.Marshal(respBlk.Message)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &GetBlockV2Response{
|
||||
Version: version.String(version.Capella),
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Data: &SignedBlock{
|
||||
Message: jsonBytes,
|
||||
Signature: respBlk.Signature,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) getBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error) {
|
||||
consensusBlk, err := blk.PbDenebBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
blindedConsensusBlk, err := blk.PbBlindedDenebBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if blindedConsensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
fullBlk, err := s.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
|
||||
}
|
||||
consensusBlk, err = fullBlk.PbDenebBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if consensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
respBlk, err := shared.SignedBeaconBlockDenebFromConsensus(consensusBlk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jsonBytes, err := json.Marshal(respBlk.Message)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &GetBlockV2Response{
|
||||
Version: version.String(version.Deneb),
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Data: &SignedBlock{
|
||||
Message: jsonBytes,
|
||||
Signature: respBlk.Signature,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (*Server) getBlockPhase0SSZ(_ context.Context, blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
consensusBlk, err := blk.PbPhase0Block()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if consensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
sszData, err := consensusBlk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return sszData, nil
|
||||
}
|
||||
|
||||
func (*Server) getBlockAltairSSZ(_ context.Context, blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
consensusBlk, err := blk.PbAltairBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if consensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
sszData, err := consensusBlk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return sszData, nil
|
||||
}
|
||||
|
||||
func (s *Server) getBlockBellatrixSSZ(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
consensusBlk, err := blk.PbBellatrixBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
blindedConsensusBlk, err := blk.PbBlindedBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if blindedConsensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
fullBlk, err := s.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
|
||||
}
|
||||
consensusBlk, err = fullBlk.PbBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if consensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
sszData, err := consensusBlk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return sszData, nil
|
||||
}
|
||||
|
||||
func (s *Server) getBlockCapellaSSZ(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
consensusBlk, err := blk.PbCapellaBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
blindedConsensusBlk, err := blk.PbBlindedCapellaBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if blindedConsensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
fullBlk, err := s.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
|
||||
}
|
||||
consensusBlk, err = fullBlk.PbCapellaBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if consensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
sszData, err := consensusBlk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return sszData, nil
|
||||
}
|
||||
|
||||
func (s *Server) getBlockDenebSSZ(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
consensusBlk, err := blk.PbDenebBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
blindedConsensusBlk, err := blk.PbBlindedDenebBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if blindedConsensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
fullBlk, err := s.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
|
||||
}
|
||||
consensusBlk, err = fullBlk.PbDenebBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if consensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
sszData, err := consensusBlk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return sszData, nil
|
||||
}
|
||||
|
||||
func (s *Server) getBlindedBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error) {
|
||||
blindedConsensusBlk, err := blk.PbBlindedBellatrixBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
consensusBlk, err := blk.PbBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if consensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
blkInterface, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert block to blinded block")
|
||||
}
|
||||
blindedConsensusBlk, err = blkInterface.PbBlindedBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if blindedConsensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
respBlk, err := shared.SignedBlindedBeaconBlockBellatrixFromConsensus(blindedConsensusBlk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jsonBytes, err := json.Marshal(respBlk.Message)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &GetBlockV2Response{
|
||||
Version: version.String(version.Bellatrix),
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Data: &SignedBlock{
|
||||
Message: jsonBytes,
|
||||
Signature: respBlk.Signature,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) getBlindedBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error) {
|
||||
blindedConsensusBlk, err := blk.PbBlindedCapellaBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
consensusBlk, err := blk.PbCapellaBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if consensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
blkInterface, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert block to blinded block")
|
||||
}
|
||||
blindedConsensusBlk, err = blkInterface.PbBlindedCapellaBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if blindedConsensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
respBlk, err := shared.SignedBlindedBeaconBlockCapellaFromConsensus(blindedConsensusBlk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jsonBytes, err := json.Marshal(respBlk.Message)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &GetBlockV2Response{
|
||||
Version: version.String(version.Capella),
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Data: &SignedBlock{
|
||||
Message: jsonBytes,
|
||||
Signature: respBlk.Signature,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) getBlindedBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*GetBlockV2Response, error) {
|
||||
blindedConsensusBlk, err := blk.PbBlindedDenebBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
consensusBlk, err := blk.PbDenebBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if consensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
blkInterface, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert block to blinded block")
|
||||
}
|
||||
blindedConsensusBlk, err = blkInterface.PbBlindedDenebBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if blindedConsensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
respBlk, err := shared.SignedBlindedBeaconBlockDenebFromConsensus(blindedConsensusBlk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jsonBytes, err := json.Marshal(respBlk.Message)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &GetBlockV2Response{
|
||||
Version: version.String(version.Deneb),
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Data: &SignedBlock{
|
||||
Message: jsonBytes,
|
||||
Signature: respBlk.Signature,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (*Server) getBlindedBlockBellatrixSSZ(_ context.Context, blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
blindedConsensusBlk, err := blk.PbBlindedBellatrixBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
consensusBlk, err := blk.PbBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if consensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
blkInterface, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert block to blinded block")
|
||||
}
|
||||
blindedConsensusBlk, err = blkInterface.PbBlindedBellatrixBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if blindedConsensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
sszData, err := blindedConsensusBlk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return sszData, nil
|
||||
}
|
||||
|
||||
func (*Server) getBlindedBlockCapellaSSZ(_ context.Context, blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
blindedConsensusBlk, err := blk.PbBlindedCapellaBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
consensusBlk, err := blk.PbCapellaBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if consensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
blkInterface, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert block to blinded block")
|
||||
}
|
||||
blindedConsensusBlk, err = blkInterface.PbBlindedCapellaBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if blindedConsensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
sszData, err := blindedConsensusBlk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return sszData, nil
|
||||
}
|
||||
|
||||
func (*Server) getBlindedBlockDenebSSZ(_ context.Context, blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
blindedConsensusBlk, err := blk.PbBlindedDenebBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
consensusBlk, err := blk.PbDenebBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if consensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
blkInterface, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert block to blinded block")
|
||||
}
|
||||
blindedConsensusBlk, err = blkInterface.PbBlindedDenebBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if blindedConsensusBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
sszData, err := blindedConsensusBlk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return sszData, nil
|
||||
}
|
||||
|
||||
// GetBlockAttestations retrieves attestation included in requested block.
|
||||
func (s *Server) GetBlockAttestations(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetBlockAttestations")
|
||||
defer span.End()
|
||||
|
||||
blockId := mux.Vars(r)["block_id"]
|
||||
if blockId == "" {
|
||||
http2.HandleError(w, "block_id is required in URL params", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
blk, err := s.Blocker.Block(ctx, []byte(blockId))
|
||||
if !shared.WriteBlockFetchError(w, blk, err) {
|
||||
return
|
||||
}
|
||||
|
||||
consensusAtts := blk.Block().Body().Attestations()
|
||||
atts := make([]*shared.Attestation, len(consensusAtts))
|
||||
for i, att := range consensusAtts {
|
||||
atts[i] = shared.AttestationFromConsensus(att)
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not get block root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not check if block is optimistic: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
resp := &GetBlockAttestationsResponse{
|
||||
Data: atts,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(ctx, root),
|
||||
}
|
||||
http2.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
// PublishBlindedBlock instructs the beacon node to use the components of the `SignedBlindedBeaconBlock` to construct
|
||||
// and publish a SignedBeaconBlock by swapping out the transactions_root for the corresponding full list of `transactions`.
|
||||
// The beacon node should broadcast a newly constructed SignedBeaconBlock to the beacon network, to be included in the
|
||||
@@ -664,13 +1567,13 @@ func (s *Server) GetStateFork(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
st, err := s.Stater.State(ctx, []byte(stateId))
|
||||
if err != nil {
|
||||
http2.HandleError(w, err.Error(), http.StatusInternalServerError)
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return
|
||||
}
|
||||
fork := st.Fork()
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
http2.HandleError(w, errors.Wrap(err, "Could not check if slot's block is optimistic").Error(), http.StatusInternalServerError)
|
||||
http2.HandleError(w, "Could not check optimistic status"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -772,7 +1675,7 @@ func (s *Server) GetCommittees(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not check if slot's block is optimistic: "+err.Error(), http.StatusInternalServerError)
|
||||
http2.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -806,10 +1709,6 @@ func (s *Server) GetBlockHeaders(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetBlockHeaders")
|
||||
defer span.End()
|
||||
|
||||
query := r.URL.Query()
|
||||
helpers.NormalizeQueryValues(query)
|
||||
r.URL.RawQuery = query.Encode()
|
||||
|
||||
rawSlot := r.URL.Query().Get("slot")
|
||||
rawParentRoot := r.URL.Query().Get("parent_root")
|
||||
|
||||
@@ -818,7 +1717,7 @@ func (s *Server) GetBlockHeaders(w http.ResponseWriter, r *http.Request) {
|
||||
var blkRoots [][32]byte
|
||||
|
||||
if rawParentRoot != "" {
|
||||
parentRoot, valid := shared.ValidateHex(w, "Parent Root", rawParentRoot, 32)
|
||||
parentRoot, valid := shared.ValidateHex(w, "Parent Root", rawParentRoot, fieldparams.RootLength)
|
||||
if !valid {
|
||||
return
|
||||
}
|
||||
@@ -971,7 +1870,7 @@ func (s *Server) GetFinalityCheckpoints(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "Could not check if slot's block is optimistic: "+err.Error(), http.StatusInternalServerError)
|
||||
http2.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user