Compare commits

...

35 Commits

Author SHA1 Message Date
nisdas
43427d435c remove them 2023-06-09 08:47:58 +08:00
nisdas
c1f2bb42cc remove other bundles 2023-06-09 08:42:32 +08:00
nisdas
b6d26a33e0 add patch 2023-06-09 08:41:29 +08:00
nisdas
ae3bf3124f Merge branch 'develop' of https://github.com/prysmaticlabs/geth-sharding into rcmgrMetrics 2023-06-08 12:04:45 +08:00
Nishant Das
c1391f0de3 Always Favour Yamux for Multiplexing (#12502) 2023-06-08 04:02:46 +00:00
james-prysm
6672d1499a prysmctl: output proposer settings (#12181)
* wip proposer settings

* WIP validator client APIs

* adding proposer settings output

* adding unit tests

* fixing linting

* fixing deepsource issues

* fixing e2e

* fixing deep source issue

* updating naming to not stutter

* updating bazel

* fixing linting error

* reverting comment

* adding builder settings

* gaz

* Update validator/client/validator.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* adding comments

* adding some tests

* gaz

* Update cmd/prysmctl/validator/proposer_settings.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update cmd/prysmctl/validator/proposer_settings.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update cmd/prysmctl/validator/proposer_settings.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update cmd/prysmctl/validator/proposer_settings.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/options.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/options.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/errors.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/options.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/options.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/validator/client.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update cmd/prysmctl/validator/cmd.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/validator/client.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/validator/client.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update cmd/prysmctl/validator/proposer_settings.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/errors.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* fixing feedback

* fixing unit test

* addressign comments

---------

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-06-06 17:03:30 +00:00
Nishant Das
33cf52831c Update Libp2p to v0.27.5 (#12486)
* add deps

* update to v0.27.5 and handle panic
2023-06-06 08:41:15 +08:00
terencechain
d543e9be00 Update spec tests to v1.4.0-alpha.1 (#12489) 2023-06-03 11:17:13 +00:00
Nishant Das
0669050ffa Add Appropriate Size for the Attestation Queue (#12485)
* add tag

* fix off by 1
2023-06-02 11:33:28 +00:00
zghh
ceff0c2024 Fix the bug that return 500 in /eth/v1/node/peers interface (#12483)
* Fix the bug that return 500 in /eth/v1/node/peers interface

* Update node.go

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-06-02 03:27:17 +00:00
Radosław Kapka
c32b581e8e Add broadcast_validation to block publishing (#12432)
* day 1

* day 2

* day 2+

* day 3

* day 4

* making bazel happy

* PublishBlindedBlockV2

* remove file

* use lock in insertSeenProposerIndex

* remove EquivocationChecker interface

* update deps.bzl

* remove middleware json tags

* go mod tidy

* remove redundant return statements

* validate in handler

* improvements

* extract common code

* remove import

* sync test fix

* Update beacon-chain/rpc/eth/beacon/handlers.go

Co-authored-by: terencechain <terence@prysmaticlabs.com>

---------

Co-authored-by: terencechain <terence@prysmaticlabs.com>
2023-06-01 11:22:49 +00:00
nisdas
711d4b1c89 add tag 2023-06-01 17:13:16 +08:00
terencechain
e516a2004f Update next slot cache correctly under late task (#12462) 2023-05-31 08:50:37 -07:00
terencechain
cb65d8af96 Proposer RPC: make setExecutionData better (#12466) 2023-05-31 06:06:32 -07:00
nisdas
639e5dc906 add prom registration 2023-05-31 15:17:58 +08:00
Nishant Das
70152bf476 Copy All Field Tries For Late Blocks (#12461)
* add new thing

* only have it for late blocks

* comments

* change to lock

* add test

* Update beacon-chain/state/state-native/state_test.go

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-05-30 09:57:20 +00:00
Radosław Kapka
8aa688729d Cleanup of ProposerPayloadIDsCache (#12474)
* Cleanup of `ProposerPayloadIDsCache`

* one more comment

* Update beacon-chain/cache/payload_id.go

Co-authored-by: terencechain <terence@prysmaticlabs.com>

* Update beacon-chain/cache/payload_id.go

Co-authored-by: terencechain <terence@prysmaticlabs.com>

---------

Co-authored-by: terencechain <terence@prysmaticlabs.com>
2023-05-29 16:10:28 +00:00
Preston Van Loon
1ffc92999f p2p: Check peer threshold is met before giving up on ctx deadline (#12446)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-28 13:24:59 +00:00
terencechain
2dcef85f97 Add spec test for v1.4.0-alpha.0 (#12460)
* Fix spec test

* Fix sha

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-25 14:05:43 +00:00
Nishant Das
52da7b3de6 Release Lock Before Panicking (#12464) 2023-05-25 06:42:21 -07:00
terencechain
be16b64535 Remove SubmitBlindBlock context timeout (#12453) 2023-05-24 14:19:23 +00:00
terencechain
f4d3939b62 Add logs for build block times (#12452)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-05-24 13:37:26 +00:00
Nishant Das
245d8a29e0 Optimize Zerohash Comparisons In Forkchoice (#12458) 2023-05-24 09:58:02 +00:00
james-prysm
666188dfea Improve validator import logs (#12429)
* adding small ux improvement

* gaz

* rolling back dir test changes

* Update validator/accounts/accounts_import.go

* adding review suggestion

* missed else part of statement

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-05-23 15:41:41 -05:00
Preston Van Loon
cfa64ae013 Restore disable-peer-scorer flag (#12386)
* Revert "Make Peer Scorer Permanent Default (#12138)"

This reverts commit 4d28d69fd9.

* make peer scoring flag warning scary
2023-05-23 13:53:02 +00:00
Potuz
cd0f814f2e fixed erroneous panic (#12450) 2023-05-23 11:12:31 +00:00
Radosław Kapka
abc81e6dde Merge all block unblinding code into a single unblinder struct (#12240)
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-05-23 11:38:52 +02:00
terencechain
6b26183e73 Add missing config yamls for domains (#12442)
* Add missing config yamls for domains

* Fix GetSpec test

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-22 18:02:41 +00:00
Preston Van Loon
7fe935e94d Fix metric name from PR #12430 (#12445)
* Fix metric name from PR #12430

* @potuz can't spell 'unknown'

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-22 17:43:21 +00:00
Potuz
e0e7c71eb5 Fix sandwich attack on honest reorgs (#12418)
* Fix sandwich attack on honest reorgs

* fix test

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-22 17:23:20 +00:00
Potuz
c80019bd0b remove trivial helper (#12443) 2023-05-22 15:59:16 +00:00
james-prysm
8dfb92c605 reverting expiration logic on validator while using --enable-registration-cache (#12436)
* reverting expiration logic

* gaz
2023-05-22 14:54:09 +00:00
Potuz
9d192a3608 Remove unused function (#12439)
* Remove unused function

* gazelle
2023-05-22 11:09:08 -03:00
Nishant Das
51bde7a845 disable it (#12438) 2023-05-22 19:18:13 +08:00
kasey
385a317902 Revert initsync revert (#12431)
* Revert "Revert "BeaconBlocksByRange and BlobSidecarsByRange consistency (#123… (#12426)"

This reverts commit ddc1e48e05.

* fix metrics bug, add batch.next tests

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-05-19 16:59:13 +00:00
123 changed files with 6527 additions and 1980 deletions

View File

@@ -205,7 +205,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.3.0"
consensus_spec_version = "v1.4.0-alpha.1"
bls_test_version = "v0.1.1"
@@ -221,7 +221,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "1c806e04ac5e3779032c06a6009350b3836b6809bb23812993d6ececd7047cf5",
sha256 = "1118a663be4a00ba00f0635eb20287157f2b2f993aed64335bfbcd04af424c2b",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -237,7 +237,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "2b42796dc5ccd9f1246032d0c17663e20f70334ff7e00325f0fc3af28cb24186",
sha256 = "acde6e10940d14f22277eda5b55b65a24623ac88e4c7a2e34134a6069f5eea82",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -253,7 +253,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "231e3371e81ce9acde65d2910ec4580587e74dbbcfcbd9c675e473e022deec8a",
sha256 = "49c022f3a3478cea849ba8f877a9f7e4c1ded549edddc09993550bbc5bb192e1",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -268,7 +268,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "219b74d95664ea7e8dfbf31162dfa206b9c0cf45919ea86db5fa0f8902977e3c",
sha256 = "c3e246ff01f6b7b9e9e41939954a6ff89dfca7297415f88781809165fa83267c",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

20
api/client/BUILD.bazel Normal file
View File

@@ -0,0 +1,20 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"client.go",
"errors.go",
"options.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client",
visibility = ["//visibility:public"],
deps = ["@com_github_pkg_errors//:go_default_library"],
)
go_test(
name = "go_default_test",
srcs = ["client_test.go"],
embed = [":go_default_library"],
deps = ["//testing/require:go_default_library"],
)

View File

@@ -6,11 +6,11 @@ go_library(
"checkpoint.go",
"client.go",
"doc.go",
"errors.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/beacon",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/rpc/apimiddleware:go_default_library",
"//beacon-chain/state:go_default_library",
@@ -39,6 +39,7 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//api/client:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -6,6 +6,7 @@ import (
"path"
"github.com/pkg/errors"
base "github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
@@ -140,7 +141,7 @@ func ComputeWeakSubjectivityCheckpoint(ctx context.Context, client *Client) (*We
ws, err := client.GetWeakSubjectivity(ctx)
if err != nil {
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
if !errors.Is(err, ErrNotOK) {
if !errors.Is(err, base.ErrNotOK) {
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
}
// fall back to vanilla Beacon Node API method

View File

@@ -7,9 +7,9 @@ import (
"fmt"
"io"
"net/http"
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
blocktest "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks/testing"
@@ -66,11 +66,7 @@ func TestMarshalToEnvelope(t *testing.T) {
}
func TestFallbackVersionCheck(t *testing.T) {
c := &Client{
hc: &http.Client{},
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
c.hc.Transport = &testRT{rt: func(req *http.Request) (*http.Response, error) {
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
@@ -88,12 +84,13 @@ func TestFallbackVersionCheck(t *testing.T) {
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
ctx := context.Background()
_, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
_, err = ComputeWeakSubjectivityCheckpoint(ctx, c)
require.ErrorIs(t, err, errUnsupportedPrysmCheckpointVersion)
}
@@ -170,44 +167,41 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
Epoch: epoch,
}
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getWeakSubjectivityPath:
res.StatusCode = http.StatusOK
cp := struct {
Epoch string `json:"epoch"`
Root string `json:"root"`
}{
Epoch: fmt.Sprintf("%d", slots.ToEpoch(b.Block().Slot())),
Root: fmt.Sprintf("%#x", bRoot),
}
wsr := struct {
Checkpoint interface{} `json:"ws_checkpoint"`
StateRoot string `json:"state_root"`
}{
Checkpoint: cp,
StateRoot: fmt.Sprintf("%#x", wRoot),
}
rb, err := marshalToEnvelope(wsr)
require.NoError(t, err)
res.Body = io.NopCloser(bytes.NewBuffer(rb))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getWeakSubjectivityPath:
res.StatusCode = http.StatusOK
cp := struct {
Epoch string `json:"epoch"`
Root string `json:"root"`
}{
Epoch: fmt.Sprintf("%d", slots.ToEpoch(b.Block().Slot())),
Root: fmt.Sprintf("%#x", bRoot),
}
wsr := struct {
Checkpoint interface{} `json:"ws_checkpoint"`
StateRoot string `json:"state_root"`
}{
Checkpoint: cp,
StateRoot: fmt.Sprintf("%#x", wRoot),
}
rb, err := marshalToEnvelope(wsr)
require.NoError(t, err)
res.Body = io.NopCloser(bytes.NewBuffer(rb))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
wsd, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
require.NoError(t, err)
@@ -266,42 +260,39 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
wsSerialized, err := wst.MarshalSSZ()
require.NoError(t, err)
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
Version string `json:"version"`
}{
Version: "Lighthouse/v0.1.5 (Linux x86_64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
Version string `json:"version"`
}{
Version: "Lighthouse/v0.1.5 (Linux x86_64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
wsPub, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
require.NoError(t, err)
@@ -315,21 +306,16 @@ func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
st, expectedEpoch := defaultTestHeadState(t, params.MainnetConfig())
serialized, err := st.MarshalSSZ()
require.NoError(t, err)
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
if req.URL.Path == renderGetStatePath(IdHead) {
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
actualEpoch, err := getWeakSubjectivityEpochFromHead(context.Background(), c)
require.NoError(t, err)
require.Equal(t, expectedEpoch, actualEpoch)
@@ -448,29 +434,24 @@ func TestDownloadFinalizedData(t *testing.T) {
ms, err := st.MarshalSSZ()
require.NoError(t, err)
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdFinalized):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(ms))
case renderGetBlockPath(IdFromRoot(br)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(mb))
default:
res.StatusCode = http.StatusInternalServerError
res.Body = io.NopCloser(bytes.NewBufferString(""))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdFinalized):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(ms))
case renderGetBlockPath(IdFromRoot(br)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(mb))
default:
res.StatusCode = http.StatusInternalServerError
res.Body = io.NopCloser(bytes.NewBufferString(""))
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
// sanity check before we go through checkpoint
// make sure we can download the state and unmarshal it with the VersionedUnmarshaler
sb, err := c.GetState(ctx, IdFinalized)

View File

@@ -5,8 +5,6 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"net/url"
"path"
@@ -14,8 +12,8 @@ import (
"sort"
"strconv"
"text/template"
"time"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/network/forks"
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
@@ -54,8 +52,6 @@ const (
IdFinalized StateOrBlockId = "finalized"
)
var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
// IdFromRoot encodes a block root in the format expected by the API in places where a root can be used to identify
// a BeaconState or SignedBeaconBlock.
func IdFromRoot(r [32]byte) StateOrBlockId {
@@ -85,96 +81,22 @@ func idTemplate(ts string) func(StateOrBlockId) string {
return f
}
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
func WithTimeout(timeout time.Duration) ClientOpt {
return func(c *Client) {
c.hc.Timeout = timeout
}
func renderGetBlockPath(id StateOrBlockId) string {
return path.Join(getSignedBlockPath, string(id))
}
// Client provides a collection of helper methods for calling the Eth Beacon Node API endpoints.
type Client struct {
hc *http.Client
baseURL *url.URL
*client.Client
}
// NewClient constructs a new client with the provided options (ex WithTimeout).
// `host` is the base host + port used to construct request urls. This value can be
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
u, err := urlForHost(host)
// NewClient returns a new Client that includes functions for rest calls to Beacon API.
func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
c, err := client.NewClient(host, opts...)
if err != nil {
return nil, err
}
c := &Client{
hc: &http.Client{},
baseURL: u,
}
for _, o := range opts {
o(c)
}
return c, nil
}
func urlForHost(h string) (*url.URL, error) {
// try to parse as url (being permissive)
u, err := url.Parse(h)
if err == nil && u.Host != "" {
return u, nil
}
// try to parse as host:port
host, port, err := net.SplitHostPort(h)
if err != nil {
return nil, ErrMalformedHostname
}
return &url.URL{Host: fmt.Sprintf("%s:%s", host, port), Scheme: "http"}, nil
}
// NodeURL returns a human-readable string representation of the beacon node base url.
func (c *Client) NodeURL() string {
return c.baseURL.String()
}
type reqOption func(*http.Request)
func withSSZEncoding() reqOption {
return func(req *http.Request) {
req.Header.Set("Accept", "application/octet-stream")
}
}
// get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
func (c *Client) get(ctx context.Context, path string, opts ...reqOption) ([]byte, error) {
u := c.baseURL.ResolveReference(&url.URL{Path: path})
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
for _, o := range opts {
o(req)
}
r, err := c.hc.Do(req)
if err != nil {
return nil, err
}
defer func() {
err = r.Body.Close()
}()
if r.StatusCode != http.StatusOK {
return nil, non200Err(r)
}
b, err := io.ReadAll(r.Body)
if err != nil {
return nil, errors.Wrap(err, "error reading http response body from GetBlock")
}
return b, nil
}
func renderGetBlockPath(id StateOrBlockId) string {
return path.Join(getSignedBlockPath, string(id))
return &Client{c}, nil
}
// GetBlock retrieves the SignedBeaconBlock for the given block id.
@@ -184,7 +106,7 @@ func renderGetBlockPath(id StateOrBlockId) string {
// The return value contains the ssz-encoded bytes.
func (c *Client) GetBlock(ctx context.Context, blockId StateOrBlockId) ([]byte, error) {
blockPath := renderGetBlockPath(blockId)
b, err := c.get(ctx, blockPath, withSSZEncoding())
b, err := c.Get(ctx, blockPath, client.WithSSZEncoding())
if err != nil {
return nil, errors.Wrapf(err, "error requesting state by id = %s", blockId)
}
@@ -199,7 +121,7 @@ var getBlockRootTpl = idTemplate(getBlockRootPath)
// for the named identifiers.
func (c *Client) GetBlockRoot(ctx context.Context, blockId StateOrBlockId) ([32]byte, error) {
rootPath := getBlockRootTpl(blockId)
b, err := c.get(ctx, rootPath)
b, err := c.Get(ctx, rootPath)
if err != nil {
return [32]byte{}, errors.Wrapf(err, "error requesting block root by id = %s", blockId)
}
@@ -222,7 +144,7 @@ var getForkTpl = idTemplate(getForkForStatePath)
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fork, error) {
body, err := c.get(ctx, getForkTpl(stateId))
body, err := c.Get(ctx, getForkTpl(stateId))
if err != nil {
return nil, errors.Wrapf(err, "error requesting fork by state id = %s", stateId)
}
@@ -238,7 +160,7 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
body, err := c.get(ctx, getForkSchedulePath)
body, err := c.Get(ctx, getForkSchedulePath)
if err != nil {
return nil, errors.Wrap(err, "error requesting fork schedule")
}
@@ -256,7 +178,7 @@ func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, er
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
func (c *Client) GetConfigSpec(ctx context.Context) (*v1.SpecResponse, error) {
body, err := c.get(ctx, getConfigSpecPath)
body, err := c.Get(ctx, getConfigSpecPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting configSpecPath")
}
@@ -279,7 +201,7 @@ var versionRE = regexp.MustCompile(`^(\w+)/(v\d+\.\d+\.\d+[-a-zA-Z0-9]*)\s*/?(.*
func parseNodeVersion(v string) (*NodeVersion, error) {
groups := versionRE.FindStringSubmatch(v)
if len(groups) != 4 {
return nil, errors.Wrapf(ErrInvalidNodeVersion, "could not be parsed: %s", v)
return nil, errors.Wrapf(client.ErrInvalidNodeVersion, "could not be parsed: %s", v)
}
return &NodeVersion{
implementation: groups[1],
@@ -291,7 +213,7 @@ func parseNodeVersion(v string) (*NodeVersion, error) {
// GetNodeVersion requests that the beacon node identify information about its implementation in a format
// similar to a HTTP User-Agent field. ex: Lighthouse/v0.1.5 (Linux x86_64)
func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
b, err := c.get(ctx, getNodeVersionPath)
b, err := c.Get(ctx, getNodeVersionPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting node version")
}
@@ -318,7 +240,7 @@ func renderGetStatePath(id StateOrBlockId) string {
// The return value contains the ssz-encoded bytes.
func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte, error) {
statePath := path.Join(getStatePath, string(stateId))
b, err := c.get(ctx, statePath, withSSZEncoding())
b, err := c.Get(ctx, statePath, client.WithSSZEncoding())
if err != nil {
return nil, errors.Wrapf(err, "error requesting state by id = %s", stateId)
}
@@ -331,7 +253,7 @@ func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte,
// - finds the highest non-skipped block preceding the epoch
// - returns the htr of the found block and returns this + the value of state_root from the block
func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData, error) {
body, err := c.get(ctx, getWeakSubjectivityPath)
body, err := c.Get(ctx, getWeakSubjectivityPath)
if err != nil {
return nil, err
}
@@ -362,7 +284,7 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
// SubmitChangeBLStoExecution calls a beacon API endpoint to set the withdrawal addresses based on the given signed messages.
// If the API responds with something other than OK there will be failure messages associated to the corresponding request message.
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apimiddleware.SignedBLSToExecutionChangeJson) error {
u := c.baseURL.ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
u := c.BaseURL().ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
body, err := json.Marshal(request)
if err != nil {
return errors.Wrap(err, "failed to marshal JSON")
@@ -372,7 +294,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apim
return errors.Wrap(err, "invalid format, failed to create new POST request object")
}
req.Header.Set("Content-Type", "application/json")
resp, err := c.hc.Do(req)
resp, err := c.Do(req)
if err != nil {
return err
}
@@ -401,7 +323,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apim
// GetBLStoExecutionChanges gets all the set withdrawal messages in the node's operation pool.
// Returns a struct representation of json response.
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.BLSToExecutionChangesPoolResponseJson, error) {
body, err := c.get(ctx, changeBLStoExecutionPath)
body, err := c.Get(ctx, changeBLStoExecutionPath)
if err != nil {
return nil, err
}
@@ -413,23 +335,6 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.B
return poolResponse, nil
}
func non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)
var body string
if err != nil {
body = "(Unable to read response body.)"
} else {
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case 404:
return errors.Wrap(ErrNotFound, msg)
default:
return errors.Wrap(ErrNotOK, msg)
}
}
type forkResponse struct {
PreviousVersion string `json:"previous_version"`
CurrentVersion string `json:"current_version"`

View File

@@ -4,6 +4,7 @@ import (
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
@@ -17,17 +18,17 @@ func TestParseNodeVersion(t *testing.T) {
{
name: "empty string",
v: "",
err: ErrInvalidNodeVersion,
err: client.ErrInvalidNodeVersion,
},
{
name: "Prysm as the version string",
v: "Prysm",
err: ErrInvalidNodeVersion,
err: client.ErrInvalidNodeVersion,
},
{
name: "semver only",
v: "v2.0.6",
err: ErrInvalidNodeVersion,
err: client.ErrInvalidNodeVersion,
},
{
name: "complete version",
@@ -91,7 +92,7 @@ func TestValidHostname(t *testing.T) {
{
name: "hostname without port",
hostArg: "mydomain.org",
err: ErrMalformedHostname,
err: client.ErrMalformedHostname,
},
{
name: "hostname with port",
@@ -132,7 +133,7 @@ func TestValidHostname(t *testing.T) {
return
}
require.NoError(t, err)
require.Equal(t, c.joined, cl.baseURL.ResolveReference(&url.URL{Path: c.path}).String())
require.Equal(t, c.joined, cl.BaseURL().ResolveReference(&url.URL{Path: c.path}).String())
})
}
}

View File

@@ -1,13 +0,0 @@
package beacon
import "github.com/pkg/errors"
// ErrNotOK is used to indicate when an HTTP request to the Beacon Node API failed with any non-2xx response code.
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
var ErrNotOK = errors.New("did not receive 2xx response from API")
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version api response format was not recognized.
var ErrInvalidNodeVersion = errors.New("invalid node version response")

View File

@@ -11,7 +11,6 @@ import (
"net/url"
"strings"
"text/template"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
@@ -36,7 +35,6 @@ const (
var errMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
var errMalformedRequest = errors.New("required request data are missing")
var errNotBlinded = errors.New("submitted block is not blinded")
var submitBlindedBlockTimeout = 3 * time.Second
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
@@ -292,8 +290,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
}
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
}
@@ -325,8 +321,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
}
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
}

97
api/client/client.go Normal file
View File

@@ -0,0 +1,97 @@
package client
import (
"context"
"io"
"net"
"net/http"
"net/url"
"github.com/pkg/errors"
)
// Client is a wrapper object around the HTTP client.
type Client struct {
hc *http.Client
baseURL *url.URL
token string
}
// NewClient constructs a new client with the provided options (ex WithTimeout).
// `host` is the base host + port used to construct request urls. This value can be
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
u, err := urlForHost(host)
if err != nil {
return nil, err
}
c := &Client{
hc: &http.Client{},
baseURL: u,
}
for _, o := range opts {
o(c)
}
return c, nil
}
// Token returns the bearer token used for jwt authentication
func (c *Client) Token() string {
return c.token
}
// BaseURL returns the base url of the client
func (c *Client) BaseURL() *url.URL {
return c.baseURL
}
// Do execute the request against the http client
func (c *Client) Do(req *http.Request) (*http.Response, error) {
return c.hc.Do(req)
}
func urlForHost(h string) (*url.URL, error) {
// try to parse as url (being permissive)
u, err := url.Parse(h)
if err == nil && u.Host != "" {
return u, nil
}
// try to parse as host:port
host, port, err := net.SplitHostPort(h)
if err != nil {
return nil, ErrMalformedHostname
}
return &url.URL{Host: net.JoinHostPort(host, port), Scheme: "http"}, nil
}
// NodeURL returns a human-readable string representation of the beacon node base url.
func (c *Client) NodeURL() string {
return c.baseURL.String()
}
// Get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byte, error) {
u := c.baseURL.ResolveReference(&url.URL{Path: path})
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
for _, o := range opts {
o(req)
}
r, err := c.hc.Do(req)
if err != nil {
return nil, err
}
defer func() {
err = r.Body.Close()
}()
if r.StatusCode != http.StatusOK {
return nil, Non200Err(r)
}
b, err := io.ReadAll(r.Body)
if err != nil {
return nil, errors.Wrap(err, "error reading http response body")
}
return b, nil
}

48
api/client/client_test.go Normal file
View File

@@ -0,0 +1,48 @@
package client
import (
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestValidHostname(t *testing.T) {
cases := []struct {
name string
hostArg string
path string
joined string
err error
}{
{
name: "hostname without port",
hostArg: "mydomain.org",
err: ErrMalformedHostname,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
cl, err := NewClient(c.hostArg)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
}
require.NoError(t, err)
require.Equal(t, c.joined, cl.BaseURL().ResolveReference(&url.URL{Path: c.path}).String())
})
}
}
func TestWithAuthenticationToken(t *testing.T) {
cl, err := NewClient("https://www.offchainlabs.com:3500", WithAuthenticationToken("my token"))
require.NoError(t, err)
require.Equal(t, cl.Token(), "my token")
}
func TestBaseURL(t *testing.T) {
cl, err := NewClient("https://www.offchainlabs.com:3500")
require.NoError(t, err)
require.Equal(t, "www.offchainlabs.com", cl.BaseURL().Hostname())
require.Equal(t, "3500", cl.BaseURL().Port())
}

40
api/client/errors.go Normal file
View File

@@ -0,0 +1,40 @@
package client
import (
"fmt"
"io"
"net/http"
"github.com/pkg/errors"
)
// ErrMalformedHostname is used to indicate if a host name's format is incorrect.
var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
// ErrNotOK is used to indicate when an HTTP request to the API failed with any non-2xx response code.
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
var ErrNotOK = errors.New("did not receive 2xx response from API")
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version API response format was not recognized.
var ErrInvalidNodeVersion = errors.New("invalid node version response")
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
func Non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)
var body string
if err != nil {
body = "(Unable to read response body.)"
} else {
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case 404:
return errors.Wrap(ErrNotFound, msg)
default:
return errors.Wrap(ErrNotOK, msg)
}
}

48
api/client/options.go Normal file
View File

@@ -0,0 +1,48 @@
package client
import (
"fmt"
"net/http"
"time"
)
// ReqOption is a request functional option.
type ReqOption func(*http.Request)
// WithSSZEncoding is a request functional option that adds SSZ encoding header.
func WithSSZEncoding() ReqOption {
return func(req *http.Request) {
req.Header.Set("Accept", "application/octet-stream")
}
}
// WithAuthorizationToken is a request functional option that adds header for authorization token.
func WithAuthorizationToken(token string) ReqOption {
return func(req *http.Request) {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
}
}
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
func WithTimeout(timeout time.Duration) ClientOpt {
return func(c *Client) {
c.hc.Timeout = timeout
}
}
// WithRoundTripper replaces the underlying HTTP's transport with a custom one.
func WithRoundTripper(t http.RoundTripper) ClientOpt {
return func(c *Client) {
c.hc.Transport = t
}
}
// WithAuthenticationToken sets an oauth token to be used.
func WithAuthenticationToken(token string) ClientOpt {
return func(c *Client) {
c.token = token
}
}

View File

@@ -0,0 +1,13 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["client.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/validator",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//validator/rpc/apimiddleware:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -0,0 +1,121 @@
package validator
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/validator/rpc/apimiddleware"
)
const (
localKeysPath = "/eth/v1/keystores"
remoteKeysPath = "/eth/v1/remotekeys"
feeRecipientPath = "/eth/v1/validator/{pubkey}/feerecipient"
)
// Client provides a collection of helper methods for calling the Keymanager API endpoints.
type Client struct {
*client.Client
}
// NewClient returns a new Client that includes functions for REST calls to keymanager APIs.
func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
c, err := client.NewClient(host, opts...)
if err != nil {
return nil, err
}
return &Client{c}, nil
}
// GetValidatorPubKeys gets the current list of web3signer or the local validator public keys in hex format.
func (c *Client) GetValidatorPubKeys(ctx context.Context) ([]string, error) {
jsonlocal, err := c.GetLocalValidatorKeys(ctx)
if err != nil {
return nil, err
}
jsonremote, err := c.GetRemoteValidatorKeys(ctx)
if err != nil {
return nil, err
}
if len(jsonlocal.Keystores) == 0 && len(jsonremote.Keystores) == 0 {
return nil, errors.New("there are no local keys or remote keys on the validator")
}
hexKeys := make(map[string]bool)
for index := range jsonlocal.Keystores {
hexKeys[jsonlocal.Keystores[index].ValidatingPubkey] = true
}
for index := range jsonremote.Keystores {
hexKeys[jsonremote.Keystores[index].Pubkey] = true
}
keys := make([]string, 0)
for k := range hexKeys {
keys = append(keys, k)
}
return keys, nil
}
// GetLocalValidatorKeys calls the keymanager APIs for local validator keys
func (c *Client) GetLocalValidatorKeys(ctx context.Context) (*apimiddleware.ListKeystoresResponseJson, error) {
localBytes, err := c.Get(ctx, localKeysPath, client.WithAuthorizationToken(c.Token()))
if err != nil {
return nil, err
}
jsonlocal := &apimiddleware.ListKeystoresResponseJson{}
if err := json.Unmarshal(localBytes, jsonlocal); err != nil {
return nil, errors.Wrap(err, "failed to parse local keystore list")
}
return jsonlocal, nil
}
// GetRemoteValidatorKeys calls the keymanager APIs for web3signer validator keys
func (c *Client) GetRemoteValidatorKeys(ctx context.Context) (*apimiddleware.ListRemoteKeysResponseJson, error) {
remoteBytes, err := c.Get(ctx, remoteKeysPath, client.WithAuthorizationToken(c.Token()))
if err != nil {
if !strings.Contains(err.Error(), "Prysm Wallet is not of type Web3Signer") {
return nil, err
}
}
jsonremote := &apimiddleware.ListRemoteKeysResponseJson{}
if len(remoteBytes) != 0 {
if err := json.Unmarshal(remoteBytes, jsonremote); err != nil {
return nil, errors.Wrap(err, "failed to parse remote keystore list")
}
}
return jsonremote, nil
}
// GetFeeRecipientAddresses takes a list of validators in hex format and returns an equal length list of fee recipients in hex format.
func (c *Client) GetFeeRecipientAddresses(ctx context.Context, validators []string) ([]string, error) {
feeRecipients := make([]string, len(validators))
for index, validator := range validators {
feejson, err := c.GetFeeRecipientAddress(ctx, validator)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("keymanager API failed to retrieve fee recipient for validator %s", validators[index]))
}
if feejson.Data == nil {
continue
}
feeRecipients[index] = feejson.Data.Ethaddress
}
return feeRecipients, nil
}
// GetFeeRecipientAddress takes a public key and calls the keymanager API to return its fee recipient.
func (c *Client) GetFeeRecipientAddress(ctx context.Context, pubkey string) (*apimiddleware.GetFeeRecipientByPubkeyResponseJson, error) {
path := strings.Replace(feeRecipientPath, "{pubkey}", pubkey, 1)
b, err := c.Get(ctx, path, client.WithAuthorizationToken(c.Token()))
if err != nil {
return nil, err
}
feejson := &apimiddleware.GetFeeRecipientByPubkeyResponseJson{}
if err := json.Unmarshal(b, feejson); err != nil {
return nil, errors.Wrap(err, "failed to parse fee recipient")
}
return feejson, nil
}

View File

@@ -144,6 +144,7 @@ func (f *Feed) Send(value interface{}) (nsent int) {
if !f.typecheck(rvalue.Type()) {
f.sendLock <- struct{}{}
f.mu.Unlock()
panic(feedTypeError{op: "Send", got: rvalue.Type(), want: f.etype})
}
f.mu.Unlock()

View File

@@ -32,6 +32,8 @@ func TestFeedPanics(t *testing.T) {
f.Send(2)
want := feedTypeError{op: "Send", got: reflect.TypeOf(uint64(0)), want: reflect.TypeOf(0)}
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
// Validate it doesn't deadlock.
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
}
{
var f Feed

View File

@@ -53,7 +53,7 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
txs, err := p.Transactions()
switch {
case errors.Is(err, consensus_types.ErrUnsupportedGetter):
case errors.Is(err, consensus_types.ErrUnsupportedField):
case err != nil:
return err
default:

View File

@@ -651,26 +651,24 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
// This routine checks if there is a cached proposer payload ID available for the next slot proposer.
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
func (s *Service) spawnLateBlockTasksLoop() {
go func() {
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("spawnLateBlockTasksLoop encountered an error waiting for initialization")
func (s *Service) runLateBlockTasks() {
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("runLateBlockTasks encountered an error waiting for initialization")
return
}
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-ticker.C():
s.lateBlockTasks(s.ctx)
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
return
}
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-ticker.C():
s.lateBlockTasks(s.ctx)
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
return
}
}
}()
}
}
// lateBlockTasks is called 4 seconds into the slot and performs tasks
@@ -685,12 +683,26 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
Type: statefeed.MissedSlot,
})
headRoot := s.headRoot()
headState := s.headState(ctx)
lastRoot, lastState := transition.LastCachedState()
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
}
// Copy all the field tries in our cached state in the event of late
// blocks.
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
// Head root should be empty when retrieving proposer index for the next slot.
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* head root */)
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
if (!has && !features.Get().PrepareAllPayloads) || id != [8]byte{} {
return
}
s.headLock.RLock()
headBlock, err := s.headBlock()
if err != nil {
@@ -698,8 +710,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
return
}
headRoot := s.headRoot()
headState := s.headState(ctx)
s.headLock.RUnlock()
_, err = s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: headState,
@@ -709,11 +719,4 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
}
lastRoot, lastState := transition.LastCachedState()
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
}
if err = transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
}

View File

@@ -130,7 +130,7 @@ func (s *Service) Start() {
}
}
s.spawnProcessAttestationsRoutine()
s.spawnLateBlockTasksLoop()
go s.runLateBlockTasks()
}
// Stop the blockchain service's main event loop and associated goroutines.

View File

@@ -3,6 +3,7 @@ package builder
import (
"context"
"testing"
"time"
buildertesting "github.com/prysmaticlabs/prysm/v4/api/client/builder/testing"
blockchainTesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
@@ -38,6 +39,21 @@ func Test_RegisterValidator(t *testing.T) {
assert.Equal(t, true, builder.RegisteredVals[pubkey])
}
func Test_RegisterValidator_WithCache(t *testing.T) {
ctx := context.Background()
headFetcher := &blockchainTesting.ChainService{}
builder := buildertesting.NewClient()
s, err := NewService(ctx, WithRegistrationCache(), WithHeadFetcher(headFetcher), WithBuilderClient(&builder))
require.NoError(t, err)
pubkey := bytesutil.ToBytes48([]byte("pubkey"))
var feeRecipient [20]byte
reg := &eth.ValidatorRegistrationV1{Pubkey: pubkey[:], Timestamp: uint64(time.Now().UTC().Unix()), FeeRecipient: feeRecipient[:]}
require.NoError(t, s.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{{Message: reg}}))
registration, err := s.registrationCache.RegistrationByIndex(0)
require.NoError(t, err)
require.DeepEqual(t, reg, registration)
}
func Test_BuilderMethodsWithouClient(t *testing.T) {
s, err := NewService(context.Background())
require.NoError(t, err)

View File

@@ -34,6 +34,7 @@ go_library(
deps = [
"//beacon-chain/state:go_default_library",
"//cache/lru:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
@@ -87,7 +88,6 @@ go_test(
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -4,35 +4,41 @@ import (
"bytes"
"sync"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
)
const keyLength = 40
const vIdLength = 8
const pIdLength = 8
const vpIdsLength = vIdLength + pIdLength
// ProposerPayloadIDsCache is a cache of proposer payload IDs.
// The key is the slot. The value is the concatenation of the proposer and payload IDs. 8 bytes each.
// The key is the concatenation of the slot and the block root.
// The value is the concatenation of the proposer and payload IDs, 8 bytes each.
type ProposerPayloadIDsCache struct {
slotToProposerAndPayloadIDs map[[40]byte][vpIdsLength]byte
slotToProposerAndPayloadIDs map[[keyLength]byte][vpIdsLength]byte
sync.RWMutex
}
// NewProposerPayloadIDsCache creates a new proposer payload IDs cache.
func NewProposerPayloadIDsCache() *ProposerPayloadIDsCache {
return &ProposerPayloadIDsCache{
slotToProposerAndPayloadIDs: make(map[[40]byte][vpIdsLength]byte),
slotToProposerAndPayloadIDs: make(map[[keyLength]byte][vpIdsLength]byte),
}
}
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot.
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(slot primitives.Slot, r [32]byte) (primitives.ValidatorIndex, [8]byte, bool) {
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot and head root to build the block.
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(
slot primitives.Slot,
r [fieldparams.RootLength]byte,
) (primitives.ValidatorIndex, [pIdLength]byte, bool) {
f.RLock()
defer f.RUnlock()
ids, ok := f.slotToProposerAndPayloadIDs[idKey(slot, r)]
if !ok {
return 0, [8]byte{}, false
return 0, [pIdLength]byte{}, false
}
vId := ids[:vIdLength]
@@ -43,8 +49,13 @@ func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(slot primitives.Slot, r
return primitives.ValidatorIndex(bytesutil.BytesToUint64BigEndian(vId)), pId, true
}
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot.
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(slot primitives.Slot, vId primitives.ValidatorIndex, pId [8]byte, r [32]byte) {
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot and head root to build block.
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(
slot primitives.Slot,
vId primitives.ValidatorIndex,
pId [pIdLength]byte,
r [fieldparams.RootLength]byte,
) {
f.Lock()
defer f.Unlock()
var vIdBytes [vIdLength]byte
@@ -63,7 +74,7 @@ func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(slot primitives.Slot,
}
}
// PrunePayloadIDs removes the payload id entries that's current than input slot.
// PrunePayloadIDs removes the payload ID entries older than input slot.
func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot primitives.Slot) {
f.Lock()
defer f.Unlock()
@@ -76,8 +87,8 @@ func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot primitives.Slot) {
}
}
func idKey(slot primitives.Slot, r [32]byte) [40]byte {
var k [40]byte
func idKey(slot primitives.Slot, r [fieldparams.RootLength]byte) [keyLength]byte {
var k [keyLength]byte
copy(k[:], append(bytesutil.Uint64ToBytesBigEndian(uint64(slot)), r[:]...))
return k
}

View File

@@ -3,15 +3,11 @@ package cache
import (
"context"
"sync"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -38,33 +34,10 @@ func (regCache *RegistrationCache) RegistrationByIndex(id primitives.ValidatorIn
regCache.lock.RUnlock()
return nil, errors.Wrapf(ErrNotFoundRegistration, "validator id %d", id)
}
isExpired, err := RegistrationTimeStampExpired(v.Timestamp)
if err != nil {
return nil, errors.Wrapf(err, "failed to check registration expiration")
}
if isExpired {
regCache.lock.RUnlock()
regCache.lock.Lock()
defer regCache.lock.Unlock()
delete(regCache.indexToRegistration, id)
log.Warnf("registration for validator index %d expired at unix time %d", id, v.Timestamp)
return nil, errors.Wrapf(ErrNotFoundRegistration, "validator id %d", id)
}
regCache.lock.RUnlock()
return v, nil
}
func RegistrationTimeStampExpired(ts uint64) (bool, error) {
// safely convert unint64 to int64
i, err := math.Int(ts)
if err != nil {
return false, err
}
expiryDuration := params.BeaconConfig().RegistrationDuration
// registered time + expiration duration < current time = expired
return time.Unix(int64(i), 0).Add(expiryDuration).Before(time.Now()), nil
}
// UpdateIndexToRegisteredMap adds or updates values in the cache based on the argument.
func (regCache *RegistrationCache) UpdateIndexToRegisteredMap(ctx context.Context, m map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1) {
_, span := trace.StartSpan(ctx, "RegistrationCache.UpdateIndexToRegisteredMap")

View File

@@ -6,15 +6,12 @@ import (
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestRegistrationCache(t *testing.T) {
hook := logTest.NewGlobal()
pubkey, err := hexutil.Decode("0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
require.NoError(t, err)
validatorIndex := primitives.ValidatorIndex(1)
@@ -31,29 +28,14 @@ func TestRegistrationCache(t *testing.T) {
reg, err := cache.RegistrationByIndex(validatorIndex)
require.NoError(t, err)
require.Equal(t, string(reg.Pubkey), string(pubkey))
t.Run("Registration expired", func(t *testing.T) {
validatorIndex2 := primitives.ValidatorIndex(2)
overExpirationPadTime := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*4) // 4 epochs
m[validatorIndex2] = &ethpb.ValidatorRegistrationV1{
FeeRecipient: []byte{},
GasLimit: 100,
Timestamp: uint64(time.Now().Add(-1 * overExpirationPadTime).Unix()),
Pubkey: pubkey,
}
cache.UpdateIndexToRegisteredMap(context.Background(), m)
_, err := cache.RegistrationByIndex(validatorIndex2)
require.ErrorContains(t, "no validator registered", err)
require.LogsContain(t, hook, "expired")
})
t.Run("Registration close to expiration still passes", func(t *testing.T) {
t.Run("successfully updates", func(t *testing.T) {
pubkey, err := hexutil.Decode("0x88247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
require.NoError(t, err)
validatorIndex2 := primitives.ValidatorIndex(2)
overExpirationPadTime := time.Second * time.Duration((params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*3)-5) // 3 epochs - 5 seconds
m[validatorIndex2] = &ethpb.ValidatorRegistrationV1{
FeeRecipient: []byte{},
GasLimit: 100,
Timestamp: uint64(time.Now().Add(-1 * overExpirationPadTime).Unix()),
Timestamp: uint64(time.Now().Unix()),
Pubkey: pubkey,
}
cache.UpdateIndexToRegisteredMap(context.Background(), m)
@@ -62,21 +44,3 @@ func TestRegistrationCache(t *testing.T) {
require.Equal(t, string(reg.Pubkey), string(pubkey))
})
}
func Test_RegistrationTimeStampExpired(t *testing.T) {
// expiration set at 3 epochs
t.Run("expired registration", func(t *testing.T) {
overExpirationPadTime := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*4) // 4 epochs
ts := uint64(time.Now().Add(-1 * overExpirationPadTime).Unix())
isExpired, err := RegistrationTimeStampExpired(ts)
require.NoError(t, err)
require.Equal(t, true, isExpired)
})
t.Run("is not expired registration", func(t *testing.T) {
overExpirationPadTime := time.Second * time.Duration((params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*3)-5) // 3 epochs -5 seconds
ts := uint64(time.Now().Add(-1 * overExpirationPadTime).Unix())
isExpired, err := RegistrationTimeStampExpired(ts)
require.NoError(t, err)
require.Equal(t, false, isExpired)
})
}

View File

@@ -61,7 +61,7 @@ func IsExecutionBlock(body interfaces.ReadOnlyBeaconBlockBody) (bool, error) {
}
payload, err := body.Execution()
switch {
case errors.Is(err, consensus_types.ErrUnsupportedGetter):
case errors.Is(err, consensus_types.ErrUnsupportedField):
return false, nil
case err != nil:
return false, err

View File

@@ -68,7 +68,6 @@ go_test(
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -8,7 +8,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
@@ -66,25 +65,6 @@ func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
return binary.LittleEndian.Uint64(b[:8])%modulo == 0, nil
}
// AggregateSignature returns the aggregated signature of the input attestations.
//
// Spec pseudocode definition:
//
// def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature:
// signatures = [attestation.signature for attestation in attestations]
// return bls.Aggregate(signatures)
func AggregateSignature(attestations []*ethpb.Attestation) (bls.Signature, error) {
sigs := make([]bls.Signature, len(attestations))
var err error
for i := 0; i < len(sigs); i++ {
sigs[i], err = bls.SignatureFromBytes(attestations[i].Signature)
if err != nil {
return nil, err
}
}
return bls.AggregateSignatures(sigs), nil
}
// IsAggregated returns true if the attestation is an aggregated attestation,
// false otherwise.
func IsAggregated(attestation *ethpb.Attestation) bool {

View File

@@ -10,8 +10,6 @@ import (
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
@@ -45,44 +43,6 @@ func TestAttestation_IsAggregator(t *testing.T) {
})
}
func TestAttestation_AggregateSignature(t *testing.T) {
t.Run("verified", func(t *testing.T) {
pubkeys := make([]bls.PublicKey, 0, 100)
atts := make([]*ethpb.Attestation, 0, 100)
msg := bytesutil.ToBytes32([]byte("hello"))
for i := 0; i < 100; i++ {
priv, err := bls.RandKey()
require.NoError(t, err)
pub := priv.PublicKey()
sig := priv.Sign(msg[:])
pubkeys = append(pubkeys, pub)
att := &ethpb.Attestation{Signature: sig.Marshal()}
atts = append(atts, att)
}
aggSig, err := helpers.AggregateSignature(atts)
require.NoError(t, err)
assert.Equal(t, true, aggSig.FastAggregateVerify(pubkeys, msg), "Signature did not verify")
})
t.Run("not verified", func(t *testing.T) {
pubkeys := make([]bls.PublicKey, 0, 100)
atts := make([]*ethpb.Attestation, 0, 100)
msg := []byte("hello")
for i := 0; i < 100; i++ {
priv, err := bls.RandKey()
require.NoError(t, err)
pub := priv.PublicKey()
sig := priv.Sign(msg)
pubkeys = append(pubkeys, pub)
att := &ethpb.Attestation{Signature: sig.Marshal()}
atts = append(atts, att)
}
aggSig, err := helpers.AggregateSignature(atts[0 : len(atts)-2])
require.NoError(t, err)
assert.Equal(t, false, aggSig.FastAggregateVerify(pubkeys, bytesutil.ToBytes32(msg)), "Signature not suppose to verify")
})
}
func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
// Create 10 committees
committeeCount := uint64(10)

View File

@@ -265,6 +265,7 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
// validators' latest votes.
func (f *ForkChoice) updateBalances() error {
newBalances := f.justifiedBalances
zHash := params.BeaconConfig().ZeroHash
for index, vote := range f.votes {
// Skip if validator has been slashed
@@ -273,7 +274,7 @@ func (f *ForkChoice) updateBalances() error {
}
// Skip if validator has never voted for current root and next root (i.e. if the
// votes are zero hash aka genesis block), there's nothing to compute.
if vote.currentRoot == params.BeaconConfig().ZeroHash && vote.nextRoot == params.BeaconConfig().ZeroHash {
if vote.currentRoot == zHash && vote.nextRoot == zHash {
continue
}
@@ -293,7 +294,7 @@ func (f *ForkChoice) updateBalances() error {
// Ignore the vote if the root is not in fork choice
// store, that means we have not seen the block before.
nextNode, ok := f.store.nodeByRoot[vote.nextRoot]
if ok && vote.nextRoot != params.BeaconConfig().ZeroHash {
if ok && vote.nextRoot != zHash {
// Protection against nil node
if nextNode == nil {
return errors.Wrap(ErrNilNode, "could not update balances")
@@ -302,7 +303,7 @@ func (f *ForkChoice) updateBalances() error {
}
currentNode, ok := f.store.nodeByRoot[vote.currentRoot]
if ok && vote.currentRoot != params.BeaconConfig().ZeroHash {
if ok && vote.currentRoot != zHash {
// Protection against nil node
if currentNode == nil {
return errors.Wrap(ErrNilNode, "could not update balances")

View File

@@ -31,9 +31,7 @@ import (
// store.justified_checkpoint = store.best_justified_checkpoint
func (f *ForkChoice) NewSlot(ctx context.Context, slot primitives.Slot) error {
// Reset proposer boost root
if err := f.resetBoostedProposerRoot(ctx); err != nil {
return errors.Wrap(err, "could not reset boosted proposer root in fork choice")
}
f.store.proposerBoostRoot = [32]byte{}
// Return if it's not a new epoch.
if !slots.IsEpochStart(slot) {

View File

@@ -1,19 +1,12 @@
package doublylinkedtree
import (
"context"
"fmt"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
)
// resetBoostedProposerRoot sets the value of the proposer boosted root to zeros.
func (f *ForkChoice) resetBoostedProposerRoot(_ context.Context) error {
f.store.proposerBoostRoot = [32]byte{}
return nil
}
// applyProposerBoostScore applies the current proposer boost scores to the
// relevant nodes.
func (f *ForkChoice) applyProposerBoostScore() error {

View File

@@ -82,6 +82,11 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgWeightThreshold {
return
}
// Only orphan a block if the parent LMD vote is strong
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
return
}
return true
}
@@ -137,6 +142,11 @@ func (f *ForkChoice) GetProposerHead() [32]byte {
return head.root
}
// Only orphan a block if the parent LMD vote is strong
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
return head.root
}
// Only reorg if we are proposing early
secs, err := slots.SecondsSinceSlotStart(head.slot+1, f.store.genesisTime, uint64(time.Now().Unix()))
if err != nil {

View File

@@ -22,7 +22,11 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
f.ProcessAttestation(ctx, []uint64{0, 1, 2}, root, 0)
attesters := make([]uint64, f.numActiveValidators-64)
for i := range attesters {
attesters[i] = uint64(i + 64)
}
f.ProcessAttestation(ctx, attesters, root, 0)
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
@@ -80,6 +84,12 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
require.Equal(t, false, f.ShouldOverrideFCU())
f.store.headNode.parent = saved
})
t.Run("parent is weak", func(t *testing.T) {
saved := f.store.headNode.parent.weight
f.store.headNode.parent.weight = 0
require.Equal(t, false, f.ShouldOverrideFCU())
f.store.headNode.parent.weight = saved
})
t.Run("Head is strong", func(t *testing.T) {
f.store.headNode.weight = f.store.committeeWeight
require.Equal(t, false, f.ShouldOverrideFCU())
@@ -101,7 +111,11 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
st, root, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
f.ProcessAttestation(ctx, []uint64{0, 1, 2}, root, 0)
attesters := make([]uint64, f.numActiveValidators-64)
for i := range attesters {
attesters[i] = uint64(i + 64)
}
f.ProcessAttestation(ctx, attesters, root, 0)
driftGenesisTime(f, 3, 1)
childRoot := [32]byte{'b'}
@@ -161,6 +175,12 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
require.Equal(t, childRoot, f.GetProposerHead())
f.store.headNode.parent = saved
})
t.Run("parent is weak", func(t *testing.T) {
saved := f.store.headNode.parent.weight
f.store.headNode.parent.weight = 0
require.Equal(t, false, f.ShouldOverrideFCU())
f.store.headNode.parent.weight = saved
})
t.Run("Head is strong", func(t *testing.T) {
f.store.headNode.weight = f.store.committeeWeight
require.Equal(t, childRoot, f.GetProposerHead())

View File

@@ -55,6 +55,7 @@ go_library(
"//beacon-chain/p2p/types:go_default_library",
"//beacon-chain/startup:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/wrapper:go_default_library",
@@ -87,6 +88,8 @@ go_library(
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_libp2p_go_libp2p//core/peerstore:go_default_library",
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/host/resource-manager:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/host/resource-manager/obs:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/muxer/mplex:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",

View File

@@ -6,12 +6,17 @@ import (
"net"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
rcmgrObs "github.com/libp2p/go-libp2p/p2p/host/resource-manager/obs"
"github.com/libp2p/go-libp2p/p2p/muxer/mplex"
"github.com/libp2p/go-libp2p/p2p/security/noise"
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
ma "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prysmaticlabs/prysm/v4/config/features"
ecdsaprysm "github.com/prysmaticlabs/prysm/v4/crypto/ecdsa"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
)
@@ -60,8 +65,8 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
libp2p.UserAgent(version.BuildData()),
libp2p.ConnectionGater(s),
libp2p.Transport(tcp.NewTCPTransport),
libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport),
libp2p.DefaultMuxers,
libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport),
}
options = append(options, libp2p.Security(noise.ID, noise.New))
@@ -99,6 +104,22 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
}
// Disable Ping Service.
options = append(options, libp2p.Ping(false))
if features.Get().DisableResourceManager {
options = append(options, libp2p.ResourceManager(&network.NullResourceManager{}))
} else {
rcmgrObs.MustRegisterWith(prometheus.DefaultRegisterer)
str, err := rcmgrObs.NewStatsTraceReporter()
if err != nil {
log.WithError(err).Fatal("Could not create stats reporter")
}
rmgr, err := rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(rcmgr.DefaultLimits.AutoScale()), rcmgr.WithTraceReporter(str))
if err != nil {
log.WithError(err).Fatal("Could not create resource manager")
}
options = append(options, libp2p.ResourceManager(rmgr))
}
return options
}

View File

@@ -123,7 +123,7 @@ func TestDefaultMultiplexers(t *testing.T) {
err = cfg.Apply(append(opts, libp2p.FallbackDefaults)...)
assert.NoError(t, err)
assert.Equal(t, protocol.ID("/mplex/6.7.0"), cfg.Muxers[0].ID)
assert.Equal(t, protocol.ID("/yamux/1.0.0"), cfg.Muxers[1].ID)
assert.Equal(t, protocol.ID("/yamux/1.0.0"), cfg.Muxers[0].ID)
assert.Equal(t, protocol.ID("/mplex/6.7.0"), cfg.Muxers[1].ID)
}

View File

@@ -14,6 +14,7 @@ go_library(
deps = [
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/peers/scorers:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/rand:go_default_library",
@@ -44,6 +45,7 @@ go_test(
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/peers/scorers:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/wrapper:go_default_library",

View File

@@ -5,6 +5,7 @@ import (
"testing"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/sirupsen/logrus"
)
@@ -12,6 +13,11 @@ func TestMain(m *testing.M) {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(io.Discard)
resetCfg := features.InitWithReset(&features.Flags{
EnablePeerScorer: true,
})
defer resetCfg()
resetFlags := flags.Get()
flags.Init(&flags.GlobalFlags{
BlockBatchLimit: 64,

View File

@@ -15,6 +15,7 @@ go_library(
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/types:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/rand:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
@@ -39,6 +40,7 @@ go_test(
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/types:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/rand:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -9,6 +9,7 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/peerdata"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/crypto/rand"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
)
@@ -290,6 +291,9 @@ func (s *BlockProviderScorer) mapScoresAndPeers(
func (s *BlockProviderScorer) FormatScorePretty(pid peer.ID) string {
s.store.RLock()
defer s.store.RUnlock()
if !features.Get().EnablePeerScorer {
return "disabled"
}
score := s.score(pid)
return fmt.Sprintf("[%0.1f%%, raw: %0.2f, blocks: %d/%d]",
(score/s.MaxScore())*100, score, s.processedBlocks(pid), s.config.ProcessedBlocksCap)

View File

@@ -11,6 +11,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/crypto/rand"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/time"
@@ -459,6 +460,16 @@ func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
tt.check(scorer)
})
}
t.Run("peer scorer disabled", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnablePeerScorer: false,
})
defer resetCfg()
peerStatuses := peerStatusGen()
scorer := peerStatuses.Scorers().BlockProviderScorer()
assert.Equal(t, "disabled", scorer.FormatScorePretty("peer1"))
})
}
func TestScorers_BlockProvider_BadPeerMarking(t *testing.T) {

View File

@@ -7,6 +7,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/sirupsen/logrus"
)
@@ -14,6 +15,11 @@ func TestMain(m *testing.M) {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(io.Discard)
resetCfg := features.InitWithReset(&features.Flags{
EnablePeerScorer: true,
})
defer resetCfg()
resetFlags := flags.Get()
flags.Init(&flags.GlobalFlags{
BlockBatchLimit: 64,

View File

@@ -7,6 +7,7 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/peerdata"
"github.com/prysmaticlabs/prysm/v4/config/features"
)
var _ Scorer = (*Service)(nil)
@@ -137,8 +138,10 @@ func (s *Service) IsBadPeerNoLock(pid peer.ID) bool {
if s.scorers.peerStatusScorer.isBadPeer(pid) {
return true
}
if s.scorers.gossipScorer.isBadPeer(pid) {
return true
if features.Get().EnablePeerScorer {
if s.scorers.gossipScorer.isBadPeer(pid) {
return true
}
}
return false
}

View File

@@ -36,6 +36,7 @@ import (
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/peerdata"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/rand"
@@ -543,6 +544,11 @@ func (p *Status) Prune() {
p.store.Lock()
defer p.store.Unlock()
// Default to old method if flag isnt enabled.
if !features.Get().EnablePeerScorer {
p.deprecatedPrune()
return
}
// Exit early if there is nothing to prune.
if len(p.store.Peers()) <= p.store.Config().MaxPeers {
return
@@ -587,6 +593,52 @@ func (p *Status) Prune() {
p.tallyIPTracker()
}
// Deprecated: This is the old peer pruning method based on
// bad response counts.
func (p *Status) deprecatedPrune() {
// Exit early if there is nothing to prune.
if len(p.store.Peers()) <= p.store.Config().MaxPeers {
return
}
notBadPeer := func(peerData *peerdata.PeerData) bool {
return peerData.BadResponses < p.scorers.BadResponsesScorer().Params().Threshold
}
type peerResp struct {
pid peer.ID
badResp int
}
peersToPrune := make([]*peerResp, 0)
// Select disconnected peers with a smaller bad response count.
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerDisconnected && notBadPeer(peerData) {
peersToPrune = append(peersToPrune, &peerResp{
pid: pid,
badResp: peerData.BadResponses,
})
}
}
// Sort peers in ascending order, so the peers with the
// least amount of bad responses are pruned first. This
// is to protect the node from malicious/lousy peers so
// that their memory is still kept.
sort.Slice(peersToPrune, func(i, j int) bool {
return peersToPrune[i].badResp < peersToPrune[j].badResp
})
limitDiff := len(p.store.Peers()) - p.store.Config().MaxPeers
if limitDiff > len(peersToPrune) {
limitDiff = len(peersToPrune)
}
peersToPrune = peersToPrune[:limitDiff]
// Delete peers from map.
for _, peerData := range peersToPrune {
p.store.DeletePeerData(peerData.pid)
}
p.tallyIPTracker()
}
// BestFinalized returns the highest finalized epoch equal to or higher than ours that is agreed
// upon by the majority of peers. This method may not return the absolute highest finalized, but
// the finalized epoch in which most peers can serve blocks (plurality voting).
@@ -694,6 +746,9 @@ func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch primitives.Epoch) (
// bad response count. In the future scoring will be used
// to determine the most suitable peers to take out.
func (p *Status) PeersToPrune() []peer.ID {
if !features.Get().EnablePeerScorer {
return p.deprecatedPeersToPrune()
}
connLimit := p.ConnectedPeerLimit()
inBoundLimit := uint64(p.InboundLimit())
activePeers := p.Active()
@@ -757,6 +812,71 @@ func (p *Status) PeersToPrune() []peer.ID {
return ids
}
// Deprecated: Is used to represent the older method
// of pruning which utilized bad response counts.
func (p *Status) deprecatedPeersToPrune() []peer.ID {
connLimit := p.ConnectedPeerLimit()
inBoundLimit := p.InboundLimit()
activePeers := p.Active()
numInboundPeers := len(p.InboundConnected())
// Exit early if we are still below our max
// limit.
if uint64(len(activePeers)) <= connLimit {
return []peer.ID{}
}
p.store.Lock()
defer p.store.Unlock()
type peerResp struct {
pid peer.ID
badResp int
}
peersToPrune := make([]*peerResp, 0)
// Select connected and inbound peers to prune.
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerConnected &&
peerData.Direction == network.DirInbound {
peersToPrune = append(peersToPrune, &peerResp{
pid: pid,
badResp: peerData.BadResponses,
})
}
}
// Sort in descending order to favour pruning peers with a
// higher bad response count.
sort.Slice(peersToPrune, func(i, j int) bool {
return peersToPrune[i].badResp > peersToPrune[j].badResp
})
// Determine amount of peers to prune using our
// max connection limit.
amountToPrune, err := pmath.Sub64(uint64(len(activePeers)), connLimit)
if err != nil {
// This should never happen
log.WithError(err).Error("Failed to determine amount of peers to prune")
return []peer.ID{}
}
// Also check for inbound peers above our limit.
excessInbound := uint64(0)
if numInboundPeers > inBoundLimit {
excessInbound = uint64(numInboundPeers - inBoundLimit)
}
// Prune the largest amount between excess peers and
// excess inbound peers.
if excessInbound > amountToPrune {
amountToPrune = excessInbound
}
if amountToPrune < uint64(len(peersToPrune)) {
peersToPrune = peersToPrune[:amountToPrune]
}
ids := make([]peer.ID, 0, len(peersToPrune))
for _, pr := range peersToPrune {
ids = append(ids, pr.pid)
}
return ids
}
// HighestEpoch returns the highest epoch reported epoch amongst peers.
func (p *Status) HighestEpoch() primitives.Epoch {
p.store.RLock()

View File

@@ -15,6 +15,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/peerdata"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/consensus-types/wrapper"
@@ -548,6 +549,10 @@ func TestPrune(t *testing.T) {
}
func TestPeerIPTracker(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnablePeerScorer: false,
})
defer resetCfg()
maxBadResponses := 2
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
@@ -582,7 +587,7 @@ func TestPeerIPTracker(t *testing.T) {
p.Prune()
for _, pr := range badPeers {
assert.Equal(t, true, p.IsBad(pr), "peer with good ip is regarded as bad")
assert.Equal(t, false, p.IsBad(pr), "peer with good ip is regarded as bad")
}
}
@@ -686,6 +691,10 @@ func TestAtInboundPeerLimit(t *testing.T) {
}
func TestPrunePeers(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnablePeerScorer: false,
})
defer resetCfg()
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
@@ -736,11 +745,13 @@ func TestPrunePeers(t *testing.T) {
}
// Ensure it is in the descending order.
currScore := p.Scorers().Score(peersToPrune[0])
currCount, err := p.Scorers().BadResponsesScorer().Count(peersToPrune[0])
require.NoError(t, err)
for _, pid := range peersToPrune {
score := p.Scorers().BadResponsesScorer().Score(pid)
assert.Equal(t, true, currScore >= score)
currScore = score
count, err := p.Scorers().BadResponsesScorer().Count(pid)
require.NoError(t, err)
assert.Equal(t, true, currCount >= count)
currCount = count
}
}

View File

@@ -62,13 +62,13 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
currNum := len(s.pubsub.ListPeers(topic))
wg := new(sync.WaitGroup)
for {
if currNum >= threshold {
break
}
if err := ctx.Err(); err != nil {
return false, errors.Errorf("unable to find requisite number of peers for topic %s - "+
"only %d out of %d peers were able to be found", topic, currNum, threshold)
}
if currNum >= threshold {
break
}
nodes := enode.ReadNodes(iterator, int(params.BeaconNetworkConfig().MinimumPeersInSubnetSearch))
for _, node := range nodes {
info, _, err := convertToAddrInfo(node)

View File

@@ -6,10 +6,12 @@ go_library(
"blinded_blocks.go",
"blocks.go",
"config.go",
"handlers.go",
"log.go",
"pool.go",
"server.go",
"state.go",
"structs.go",
"sync_committee.go",
"validator.go",
],
@@ -49,7 +51,9 @@ go_library(
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//network:go_default_library",
"//network/forks:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
"//proto/migration:go_default_library",
@@ -57,8 +61,10 @@ go_library(
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_go_playground_validator_v10//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_wealdtech_go_bytesutil//:go_default_library",
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
@@ -75,6 +81,7 @@ go_test(
"blinded_blocks_test.go",
"blocks_test.go",
"config_test.go",
"handlers_test.go",
"init_test.go",
"pool_test.go",
"server_test.go",
@@ -129,6 +136,7 @@ go_test(
"@com_github_golang_mock//gomock:go_default_library",
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_stretchr_testify//mock:go_default_library",
"@com_github_wealdtech_go_bytesutil//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",

View File

@@ -44,8 +44,8 @@ func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
}
result, err = getBlindedBlockAltair(blk)
@@ -53,8 +53,8 @@ func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
}
result, err = bs.getBlindedBlockBellatrix(ctx, blk)
@@ -62,8 +62,8 @@ func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
}
result, err = bs.getBlindedBlockCapella(ctx, blk)
@@ -71,8 +71,8 @@ func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
}
@@ -99,8 +99,8 @@ func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequ
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = getSSZBlockAltair(blk)
@@ -108,8 +108,8 @@ func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequ
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = bs.getBlindedSSZBlockBellatrix(ctx, blk)
@@ -117,8 +117,8 @@ func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequ
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = bs.getBlindedSSZBlockCapella(ctx, blk)
@@ -126,8 +126,8 @@ func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequ
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
@@ -146,7 +146,7 @@ func (bs *Server) SubmitBlindedBlock(ctx context.Context, req *ethpbv2.SignedBli
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlindedBlock")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -189,7 +189,7 @@ func (bs *Server) SubmitBlindedBlockSSZ(ctx context.Context, req *ethpbv2.SSZCon
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlindedBlockSSZ")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -344,8 +344,8 @@ func getBlindedBlockAltair(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.B
func (bs *Server) getBlindedBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
bellatrixBlk, err := blk.PbBellatrixBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
if blindedBellatrixBlk == nil {
return nil, errNilBlock
@@ -414,8 +414,8 @@ func (bs *Server) getBlindedBlockBellatrix(ctx context.Context, blk interfaces.R
func (bs *Server) getBlindedBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
capellaBlk, err := blk.PbCapellaBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedCapellaBlk, err := blk.PbBlindedCapellaBlock(); err == nil {
if blindedCapellaBlk == nil {
return nil, errNilBlock
@@ -484,8 +484,8 @@ func (bs *Server) getBlindedBlockCapella(ctx context.Context, blk interfaces.Rea
func (bs *Server) getBlindedSSZBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
bellatrixBlk, err := blk.PbBellatrixBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
if blindedBellatrixBlk == nil {
return nil, errNilBlock
@@ -559,8 +559,8 @@ func (bs *Server) getBlindedSSZBlockBellatrix(ctx context.Context, blk interface
func (bs *Server) getBlindedSSZBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
capellaBlk, err := blk.PbCapellaBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedCapellaBlk, err := blk.PbBlindedCapellaBlock(); err == nil {
if blindedCapellaBlk == nil {
return nil, errNilBlock

View File

@@ -43,7 +43,7 @@ var (
// determines the best block root and state root to use for a Checkpoint Sync starting from that point.
// DEPRECATED: GetWeakSubjectivity endpoint will no longer be supported
func (bs *Server) GetWeakSubjectivity(ctx context.Context, _ *empty.Empty) (*ethpbv1.WeakSubjectivityResponse, error) {
if err := rpchelpers.ValidateSync(ctx, bs.SyncChecker, bs.HeadFetcher, bs.GenesisTimeFetcher, bs.OptimisticModeFetcher); err != nil {
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.GenesisTimeFetcher, bs.OptimisticModeFetcher); err != nil {
// This is already a grpc error, so we can't wrap it any further
return nil, err
}
@@ -206,7 +206,7 @@ func (bs *Server) SubmitBlock(ctx context.Context, req *ethpbv2.SignedBeaconBloc
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlock")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -246,7 +246,7 @@ func (bs *Server) SubmitBlockSSZ(ctx context.Context, req *ethpbv2.SSZContainer)
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlockSSZ")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -420,8 +420,8 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = getBlockAltair(blk)
@@ -429,8 +429,8 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = bs.getBlockBellatrix(ctx, blk)
@@ -438,8 +438,8 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = bs.getBlockCapella(ctx, blk)
@@ -447,8 +447,8 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
@@ -474,8 +474,8 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = getSSZBlockAltair(blk)
@@ -483,8 +483,8 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = bs.getSSZBlockBellatrix(ctx, blk)
@@ -492,8 +492,8 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = bs.getSSZBlockCapella(ctx, blk)
@@ -501,8 +501,8 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
@@ -689,8 +689,8 @@ func getBlockAltair(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlockRes
func (bs *Server) getBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlockResponseV2, error) {
bellatrixBlk, err := blk.PbBellatrixBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
if blindedBellatrixBlk == nil {
return nil, errNilBlock
@@ -759,8 +759,8 @@ func (bs *Server) getBlockBellatrix(ctx context.Context, blk interfaces.ReadOnly
func (bs *Server) getBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlockResponseV2, error) {
capellaBlk, err := blk.PbCapellaBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedCapellaBlk, err := blk.PbBlindedCapellaBlock(); err == nil {
if blindedCapellaBlk == nil {
return nil, errNilBlock
@@ -872,8 +872,8 @@ func getSSZBlockAltair(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZCo
func (bs *Server) getSSZBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
bellatrixBlk, err := blk.PbBellatrixBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
if blindedBellatrixBlk == nil {
return nil, errNilBlock
@@ -948,8 +948,8 @@ func (bs *Server) getSSZBlockBellatrix(ctx context.Context, blk interfaces.ReadO
func (bs *Server) getSSZBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
capellaBlk, err := blk.PbCapellaBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedCapellaBlk, err := blk.PbBlindedCapellaBlock(); err == nil {
if blindedCapellaBlk == nil {
return nil, errNilBlock

View File

@@ -136,7 +136,7 @@ func TestGetSpec(t *testing.T) {
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
assert.Equal(t, 105, len(resp.Data))
assert.Equal(t, 108, len(resp.Data))
for k, v := range resp.Data {
switch k {
case "CONFIG_NAME":
@@ -331,6 +331,10 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "0x08000000", v)
case "DOMAIN_CONTRIBUTION_AND_PROOF":
assert.Equal(t, "0x09000000", v)
case "DOMAIN_BLS_TO_EXECUTION_CHANGE":
assert.Equal(t, "0x0a000000", v)
case "DOMAIN_APPLICATION_BUILDER":
assert.Equal(t, "0x00000001", v)
case "TRANSITION_TOTAL_DIFFICULTY":
assert.Equal(t, "0", v)
case "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":
@@ -361,6 +365,8 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "2", v)
case "REORG_WEIGHT_THRESHOLD":
assert.Equal(t, "20", v)
case "REORG_PARENT_WEIGHT_THRESHOLD":
assert.Equal(t, "160", v)
case "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY":
default:
t.Errorf("Incorrect key: %s", k)

View File

@@ -0,0 +1,375 @@
package beacon
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"github.com/go-playground/validator/v10"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/network"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
const (
broadcastValidationQueryParam = "broadcast_validation"
broadcastValidationConsensus = "consensus"
broadcastValidationConsensusAndEquivocation = "consensus_and_equivocation"
)
// PublishBlindedBlockV2 instructs the beacon node to use the components of the `SignedBlindedBeaconBlock` to construct and publish a
// `SignedBeaconBlock` by swapping out the `transactions_root` for the corresponding full list of `transactions`.
// The beacon node should broadcast a newly constructed `SignedBeaconBlock` to the beacon network,
// to be included in the beacon chain. The beacon node is not required to validate the signed
// `BeaconBlock`, and a successful response (20X) only indicates that the broadcast has been
// successful. The beacon node is expected to integrate the new block into its state, and
// therefore validate the block internally, however blocks which fail the validation are still
// broadcast but a different status code is returned (202). Pre-Bellatrix, this endpoint will accept
// a `SignedBeaconBlock`. The broadcast behaviour may be adjusted via the `broadcast_validation`
// query parameter.
func (bs *Server) PublishBlindedBlockV2(w http.ResponseWriter, r *http.Request) {
if ok := bs.checkSync(r.Context(), w); !ok {
return
}
validate := validator.New()
body, err := io.ReadAll(r.Body)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not read request body",
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return
}
var capellaBlock *SignedBlindedBeaconBlockCapella
if err = unmarshalStrict(body, &capellaBlock); err == nil {
if err = validate.Struct(capellaBlock); err == nil {
consensusBlock, err := capellaBlock.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
return
}
}
var bellatrixBlock *SignedBlindedBeaconBlockBellatrix
if err = unmarshalStrict(body, &bellatrixBlock); err == nil {
if err = validate.Struct(bellatrixBlock); err == nil {
consensusBlock, err := bellatrixBlock.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
return
}
}
var altairBlock *SignedBeaconBlockAltair
if err = unmarshalStrict(body, &altairBlock); err == nil {
if err = validate.Struct(altairBlock); err == nil {
consensusBlock, err := altairBlock.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
return
}
}
var phase0Block *SignedBeaconBlock
if err = unmarshalStrict(body, &phase0Block); err == nil {
if err = validate.Struct(phase0Block); err == nil {
consensusBlock, err := phase0Block.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
return
}
}
errJson := &network.DefaultErrorJson{
Message: "Body does not represent a valid block type",
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
}
// PublishBlockV2 instructs the beacon node to broadcast a newly signed beacon block to the beacon network,
// to be included in the beacon chain. A success response (20x) indicates that the block
// passed gossip validation and was successfully broadcast onto the network.
// The beacon node is also expected to integrate the block into the state, but may broadcast it
// before doing so, so as to aid timely delivery of the block. Should the block fail full
// validation, a separate success response code (202) is used to indicate that the block was
// successfully broadcast but failed integration. The broadcast behaviour may be adjusted via the
// `broadcast_validation` query parameter.
func (bs *Server) PublishBlockV2(w http.ResponseWriter, r *http.Request) {
if ok := bs.checkSync(r.Context(), w); !ok {
return
}
validate := validator.New()
body, err := io.ReadAll(r.Body)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not read request body",
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return
}
var capellaBlock *SignedBeaconBlockCapella
if err = unmarshalStrict(body, &capellaBlock); err == nil {
if err = validate.Struct(capellaBlock); err == nil {
consensusBlock, err := capellaBlock.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
return
}
}
var bellatrixBlock *SignedBeaconBlockBellatrix
if err = unmarshalStrict(body, &bellatrixBlock); err == nil {
if err = validate.Struct(bellatrixBlock); err == nil {
consensusBlock, err := bellatrixBlock.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
return
}
}
var altairBlock *SignedBeaconBlockAltair
if err = unmarshalStrict(body, &altairBlock); err == nil {
if err = validate.Struct(altairBlock); err == nil {
consensusBlock, err := altairBlock.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
return
}
}
var phase0Block *SignedBeaconBlock
if err = unmarshalStrict(body, &phase0Block); err == nil {
if err = validate.Struct(phase0Block); err == nil {
consensusBlock, err := phase0Block.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
return
}
}
errJson := &network.DefaultErrorJson{
Message: "Body does not represent a valid block type",
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
}
func (bs *Server) proposeBlock(ctx context.Context, w http.ResponseWriter, blk *eth.GenericSignedBeaconBlock) {
_, err := bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, blk)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return
}
}
func unmarshalStrict(data []byte, v interface{}) error {
dec := json.NewDecoder(bytes.NewReader(data))
dec.DisallowUnknownFields()
return dec.Decode(v)
}
func (bs *Server) validateBroadcast(r *http.Request, blk *eth.GenericSignedBeaconBlock) error {
switch r.URL.Query().Get(broadcastValidationQueryParam) {
case broadcastValidationConsensus:
b, err := blocks.NewSignedBeaconBlock(blk.Block)
if err != nil {
return errors.Wrapf(err, "could not create signed beacon block")
}
if err = bs.validateConsensus(r.Context(), b); err != nil {
return errors.Wrap(err, "consensus validation failed")
}
case broadcastValidationConsensusAndEquivocation:
b, err := blocks.NewSignedBeaconBlock(blk.Block)
if err != nil {
return errors.Wrapf(err, "could not create signed beacon block")
}
if err = bs.validateConsensus(r.Context(), b); err != nil {
return errors.Wrap(err, "consensus validation failed")
}
if err = bs.validateEquivocation(b.Block()); err != nil {
return errors.Wrap(err, "equivocation validation failed")
}
default:
return nil
}
return nil
}
func (bs *Server) validateConsensus(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) error {
parentRoot := blk.Block().ParentRoot()
parentState, err := bs.Stater.State(ctx, parentRoot[:])
if err != nil {
return errors.Wrap(err, "could not get parent state")
}
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
if err != nil {
return errors.Wrap(err, "could not execute state transition")
}
return nil
}
func (bs *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error {
if bs.ForkchoiceFetcher.HighestReceivedBlockSlot() == blk.Slot() {
return fmt.Errorf("block for slot %d already exists in fork choice", blk.Slot())
}
return nil
}
func (bs *Server) checkSync(ctx context.Context, w http.ResponseWriter) bool {
isSyncing, syncDetails, err := helpers.ValidateSyncHTTP(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not check if node is syncing: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return false
}
if isSyncing {
msg := "Beacon node is currently syncing and not serving request on that endpoint"
details, err := json.Marshal(syncDetails)
if err == nil {
msg += " Details: " + string(details)
}
errJson := &network.DefaultErrorJson{
Message: msg,
Code: http.StatusServiceUnavailable,
}
network.WriteError(w, errJson)
return false
}
return true
}

File diff suppressed because it is too large Load Diff

View File

@@ -45,4 +45,5 @@ type Server struct {
ExecutionPayloadReconstructor execution.ExecutionPayloadReconstructor
FinalizationFetcher blockchain.FinalizationFetcher
BLSChangesPool blstoexec.PoolManager
ForkchoiceFetcher blockchain.ForkchoiceFetcher
}

File diff suppressed because it is too large Load Diff

View File

@@ -20,9 +20,9 @@ import (
"google.golang.org/grpc/status"
)
// ValidateSync checks whether the node is currently syncing and returns an error if it is.
// ValidateSyncGRPC checks whether the node is currently syncing and returns an error if it is.
// It also appends syncing info to gRPC headers.
func ValidateSync(
func ValidateSyncGRPC(
ctx context.Context,
syncChecker sync.Checker,
headFetcher blockchain.HeadFetcher,
@@ -38,8 +38,8 @@ func ValidateSync(
return status.Errorf(codes.Internal, "Could not check optimistic status: %v", err)
}
syncDetailsContainer := &syncDetailsContainer{
SyncDetails: &SyncDetailsJson{
syncDetailsContainer := &SyncDetailsContainer{
Data: &SyncDetailsJson{
HeadSlot: strconv.FormatUint(uint64(headSlot), 10),
SyncDistance: strconv.FormatUint(uint64(timeFetcher.CurrentSlot()-headSlot), 10),
IsSyncing: true,
@@ -58,6 +58,35 @@ func ValidateSync(
return status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
}
// ValidateSyncHTTP checks whether the node is currently syncing and returns sync information.
// It returns information whether the node is currently syncing along with sync details.
func ValidateSyncHTTP(
ctx context.Context,
syncChecker sync.Checker,
headFetcher blockchain.HeadFetcher,
timeFetcher blockchain.TimeFetcher,
optimisticModeFetcher blockchain.OptimisticModeFetcher,
) (bool, *SyncDetailsContainer, error) {
if !syncChecker.Syncing() {
return false, nil, nil
}
headSlot := headFetcher.HeadSlot()
isOptimistic, err := optimisticModeFetcher.IsOptimistic(ctx)
if err != nil {
return true, nil, errors.Wrap(err, "could not check optimistic status")
}
syncDetails := &SyncDetailsContainer{
Data: &SyncDetailsJson{
HeadSlot: strconv.FormatUint(uint64(headSlot), 10),
SyncDistance: strconv.FormatUint(uint64(timeFetcher.CurrentSlot()-headSlot), 10),
IsSyncing: true,
IsOptimistic: isOptimistic,
},
}
return true, syncDetails, nil
}
// IsOptimistic checks whether the beacon state's block is optimistic.
func IsOptimistic(
ctx context.Context,
@@ -197,7 +226,7 @@ type SyncDetailsJson struct {
ElOffline bool `json:"el_offline"`
}
// SyncDetailsContainer is a wrapper for SyncDetails.
type syncDetailsContainer struct {
SyncDetails *SyncDetailsJson `json:"sync_details"`
// SyncDetailsContainer is a wrapper for Data.
type SyncDetailsContainer struct {
Data *SyncDetailsJson `json:"data"`
}

View File

@@ -42,7 +42,7 @@ func TestValidateSync(t *testing.T) {
Slot: &headSlot,
State: st,
}
err = ValidateSync(ctx, syncChecker, chainService, chainService, chainService)
err = ValidateSyncGRPC(ctx, syncChecker, chainService, chainService, chainService)
require.NotNil(t, err)
sts, ok := grpc.ServerTransportStreamFromContext(ctx).(*runtime.ServerTransportStream)
require.Equal(t, true, ok, "type assertion failed")
@@ -51,7 +51,7 @@ func TestValidateSync(t *testing.T) {
require.Equal(t, true, ok, "could not retrieve custom error metadata value")
assert.DeepEqual(
t,
[]string{`{"sync_details":{"head_slot":"50","sync_distance":"50","is_syncing":true,"is_optimistic":false,"el_offline":false}}`},
[]string{`{"data":{"head_slot":"50","sync_distance":"50","is_syncing":true,"is_optimistic":false,"el_offline":false}}`},
v,
)
})
@@ -67,7 +67,7 @@ func TestValidateSync(t *testing.T) {
Slot: &headSlot,
State: st,
}
err = ValidateSync(ctx, syncChecker, nil, nil, chainService)
err = ValidateSyncGRPC(ctx, syncChecker, nil, nil, chainService)
require.NoError(t, err)
})
}

View File

@@ -337,6 +337,9 @@ func handleEmptyFilters(req *ethpb.PeersRequest) (emptyState, emptyDirection boo
func peerInfo(peerStatus *peers.Status, id peer.ID) (*ethpb.Peer, error) {
enr, err := peerStatus.ENR(id)
if err != nil {
if errors.Is(err, peerdata.ErrPeerUnknown) {
return nil, nil
}
return nil, errors.Wrap(err, "could not obtain ENR")
}
var serializedEnr string
@@ -348,14 +351,23 @@ func peerInfo(peerStatus *peers.Status, id peer.ID) (*ethpb.Peer, error) {
}
address, err := peerStatus.Address(id)
if err != nil {
if errors.Is(err, peerdata.ErrPeerUnknown) {
return nil, nil
}
return nil, errors.Wrap(err, "could not obtain address")
}
connectionState, err := peerStatus.ConnectionState(id)
if err != nil {
if errors.Is(err, peerdata.ErrPeerUnknown) {
return nil, nil
}
return nil, errors.Wrap(err, "could not obtain connection state")
}
direction, err := peerStatus.Direction(id)
if err != nil {
if errors.Is(err, peerdata.ErrPeerUnknown) {
return nil, nil
}
return nil, errors.Wrap(err, "could not obtain direction")
}
if eth.PeerDirection(direction) == eth.PeerDirection_UNKNOWN {

View File

@@ -44,7 +44,7 @@ func (vs *Server) GetAttesterDuties(ctx context.Context, req *ethpbv1.AttesterDu
ctx, span := trace.StartSpan(ctx, "validator.GetAttesterDuties")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -133,7 +133,7 @@ func (vs *Server) GetProposerDuties(ctx context.Context, req *ethpbv1.ProposerDu
ctx, span := trace.StartSpan(ctx, "validator.GetProposerDuties")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -224,7 +224,7 @@ func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpbv2.SyncC
ctx, span := trace.StartSpan(ctx, "validator.GetSyncCommitteeDuties")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -307,7 +307,7 @@ func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockR
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlockV2")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -406,7 +406,7 @@ func (vs *Server) ProduceBlockV2SSZ(ctx context.Context, req *ethpbv1.ProduceBlo
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlockV2SSZ")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -515,7 +515,7 @@ func (vs *Server) ProduceBlindedBlock(ctx context.Context, req *ethpbv1.ProduceB
if !vs.BlockBuilder.Configured() {
return nil, status.Error(codes.Internal, "Block builder not configured")
}
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -614,7 +614,7 @@ func (vs *Server) ProduceBlindedBlockSSZ(ctx context.Context, req *ethpbv1.Produ
if !vs.BlockBuilder.Configured() {
return nil, status.Error(codes.Internal, "Block builder not configured")
}
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -891,7 +891,7 @@ func (vs *Server) SubmitBeaconCommitteeSubscription(ctx context.Context, req *et
ctx, span := trace.StartSpan(ctx, "validator.SubmitBeaconCommitteeSubscription")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
@@ -972,7 +972,7 @@ func (vs *Server) SubmitSyncCommitteeSubscription(ctx context.Context, req *ethp
ctx, span := trace.StartSpan(ctx, "validator.SubmitSyncCommitteeSubscription")
defer span.End()
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}

View File

@@ -25,6 +25,7 @@ go_library(
"server.go",
"status.go",
"sync_committee.go",
"unblinder.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator",
visibility = ["//beacon-chain:__subpackages__"],
@@ -55,13 +56,14 @@ go_library(
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/payload-attribute:go_default_library",
@@ -96,6 +98,7 @@ go_library(
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
@@ -181,7 +184,6 @@ go_test(
"proposer_attestations_test.go",
"proposer_bellatrix_test.go",
"proposer_builder_test.go",
"proposer_capella_test.go",
"proposer_deposits_test.go",
"proposer_empty_block_test.go",
"proposer_execution_payload_test.go",
@@ -194,6 +196,7 @@ go_test(
"status_mainnet_test.go",
"status_test.go",
"sync_committee_test.go",
"unblinder_test.go",
"validator_test.go",
],
embed = [":go_default_library"],

View File

@@ -50,6 +50,15 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
defer span.End()
span.AddAttributes(trace.Int64Attribute("slot", int64(req.Slot)))
t, err := slots.ToTime(uint64(vs.TimeFetcher.GenesisTime().Unix()), req.Slot)
if err != nil {
log.WithError(err).Error("Could not convert slot to time")
}
log.WithFields(logrus.Fields{
"slot": req.Slot,
"sinceSlotStartTime": time.Since(t),
}).Info("Begin building block")
// A syncing validator should not produce a block.
if vs.SyncChecker.Syncing() {
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
@@ -131,8 +140,17 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
// Set sync aggregate. New in Altair.
vs.setSyncAggregate(ctx, sBlk)
// Set execution data. New in Bellatrix.
if err := vs.setExecutionData(ctx, sBlk, head); err != nil {
// Get local and builder (if enabled) payloads. Set execution data. New in Bellatrix.
localPayload, err := vs.getLocalPayload(ctx, sBlk.Block(), head)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get local payload: %v", err)
}
builderPayload, err := vs.getBuilderPayload(ctx, sBlk.Block().Slot(), sBlk.Block().ProposerIndex())
if err != nil {
builderGetPayloadMissCount.Inc()
log.WithError(err).Error("Could not get builder payload")
}
if err := setExecutionData(ctx, sBlk, localPayload, builderPayload); err != nil {
return nil, status.Errorf(codes.Internal, "Could not set execution data: %v", err)
}
@@ -146,6 +164,12 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
}
sBlk.SetStateRoot(sr)
log.WithFields(logrus.Fields{
"slot": req.Slot,
"sinceSlotStartTime": time.Since(t),
"validator": sBlk.Block().ProposerIndex(),
}).Info("Finished building block")
pb, err := sBlk.Block().Proto()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not convert block to proto: %v", err)
@@ -209,7 +233,18 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
vs.setBlsToExecData(sBlk, head)
}()
if err := vs.setExecutionData(ctx, sBlk, head); err != nil {
localPayload, err := vs.getLocalPayload(ctx, sBlk.Block(), head)
if err != nil {
return status.Errorf(codes.Internal, "Could not get local payload: %v", err)
}
builderPayload, err := vs.getBuilderPayload(ctx, sBlk.Block().Slot(), sBlk.Block().ProposerIndex())
if err != nil {
builderGetPayloadMissCount.Inc()
log.WithError(err).Error("Could not get builder payload")
}
if err := setExecutionData(ctx, sBlk, localPayload, builderPayload); err != nil {
return status.Errorf(codes.Internal, "Could not set execution data: %v", err)
}
@@ -309,7 +344,7 @@ func (vs *Server) GetFeeRecipientByPubKey(ctx context.Context, request *ethpb.Fe
}, nil
}
func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpb.ProposeResponse, error) {
func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk interfaces.SignedBeaconBlock) (*ethpb.ProposeResponse, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.proposeGenericBeaconBlock")
defer span.End()
root, err := blk.Block().HashTreeRoot()
@@ -317,16 +352,13 @@ func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk interfaces.
return nil, fmt.Errorf("could not tree hash block: %v", err)
}
if slots.ToEpoch(blk.Block().Slot()) >= params.BeaconConfig().CapellaForkEpoch {
blk, err = vs.unblindBuilderBlockCapella(ctx, blk)
if err != nil {
return nil, err
}
} else {
blk, err = vs.unblindBuilderBlock(ctx, blk)
if err != nil {
return nil, err
}
unblinder, err := newUnblinder(blk, vs.BlockBuilder)
if err != nil {
return nil, errors.Wrap(err, "could not create unblinder")
}
blk, err = unblinder.unblindBuilderBlock(ctx)
if err != nil {
return nil, errors.Wrap(err, "could not unblind builder block")
}
// Do not block proposal critical path with debug logging or block feed updates.

View File

@@ -12,18 +12,14 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/v4/api/client/builder"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
"github.com/prysmaticlabs/prysm/v4/network/forks"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/sirupsen/logrus"
@@ -45,93 +41,84 @@ var emptyTransactionsRoot = [32]byte{127, 254, 36, 30, 166, 1, 135, 253, 176, 24
const blockBuilderTimeout = 1 * time.Second
// Sets the execution data for the block. Execution data can come from local EL client or remote builder depends on validator registration and circuit breaker conditions.
func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, headState state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "ProposerServer.setExecutionData")
func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, localPayload, builderPayload interfaces.ExecutionData) error {
_, span := trace.StartSpan(ctx, "ProposerServer.setExecutionData")
defer span.End()
idx := blk.Block().ProposerIndex()
slot := blk.Block().Slot()
if slots.ToEpoch(slot) < params.BeaconConfig().BellatrixForkEpoch {
return nil
}
canUseBuilder, err := vs.canUseBuilder(ctx, slot, idx)
span.AddAttributes(trace.BoolAttribute("canUseBuilder", canUseBuilder))
if err != nil {
log.WithError(err).Warn("Proposer: failed to check if builder can be used")
} else if canUseBuilder {
builderPayload, err := vs.getPayloadHeaderFromBuilder(ctx, slot, idx)
if localPayload == nil {
return errors.New("local payload is nil")
}
// Use local payload if builder payload is nil.
if builderPayload == nil {
return blk.SetExecution(localPayload)
}
switch {
case blk.Version() >= version.Capella:
// Compare payload values between local and builder. Default to the local value if it is higher.
localValueGwei, err := localPayload.ValueInGwei()
if err != nil {
builderGetPayloadMissCount.Inc()
log.WithError(err).Warn("Proposer: failed to get payload header from builder")
} else {
switch {
case blk.Version() >= version.Capella:
localPayload, err := vs.getExecutionPayload(ctx, slot, idx, blk.Block().ParentRoot(), headState)
if err != nil {
return errors.Wrap(err, "failed to get execution payload")
}
// Compare payload values between local and builder. Default to the local value if it is higher.
localValueGwei, err := localPayload.ValueInGwei()
if err != nil {
return errors.Wrap(err, "failed to get local payload value")
}
builderValueGwei, err := builderPayload.ValueInGwei()
if err != nil {
log.WithError(err).Warn("Proposer: failed to get builder payload value") // Default to local if can't get builder value.
}
return errors.Wrap(err, "failed to get local payload value")
}
builderValueGwei, err := builderPayload.ValueInGwei()
if err != nil {
log.WithError(err).Warn("Proposer: failed to get builder payload value") // Default to local if can't get builder value.
return blk.SetExecution(localPayload)
}
withdrawalsMatched, err := matchingWithdrawalsRoot(localPayload, builderPayload)
if err != nil {
tracing.AnnotateError(span, err)
return errors.Wrap(err, "failed to match withdrawals root")
}
withdrawalsMatched, err := matchingWithdrawalsRoot(localPayload, builderPayload)
if err != nil {
tracing.AnnotateError(span, err)
log.WithError(err).Warn("Proposer: failed to match withdrawals root")
return blk.SetExecution(localPayload)
}
// Use builder payload if the following in true:
// builder_bid_value * 100 > local_block_value * (local-block-value-boost + 100)
boost := params.BeaconConfig().LocalBlockValueBoost
higherValueBuilder := builderValueGwei*100 > localValueGwei*(100+boost)
// Use builder payload if the following in true:
// builder_bid_value * 100 > local_block_value * (local-block-value-boost + 100)
boost := params.BeaconConfig().LocalBlockValueBoost
higherValueBuilder := builderValueGwei*100 > localValueGwei*(100+boost)
// If we can't get the builder value, just use local block.
if higherValueBuilder && withdrawalsMatched { // Builder value is higher and withdrawals match.
blk.SetBlinded(true)
if err := blk.SetExecution(builderPayload); err != nil {
log.WithError(err).Warn("Proposer: failed to set builder payload")
blk.SetBlinded(false)
} else {
return nil
}
}
if !higherValueBuilder {
log.WithFields(logrus.Fields{
"localGweiValue": localValueGwei,
"localBoostPercentage": boost,
"builderGweiValue": builderValueGwei,
}).Warn("Proposer: using local execution payload because higher value")
}
span.AddAttributes(
trace.BoolAttribute("higherValueBuilder", higherValueBuilder),
trace.Int64Attribute("localGweiValue", int64(localValueGwei)), // lint:ignore uintcast -- This is OK for tracing.
trace.Int64Attribute("localBoostPercentage", int64(boost)), // lint:ignore uintcast -- This is OK for tracing.
trace.Int64Attribute("builderGweiValue", int64(builderValueGwei)), // lint:ignore uintcast -- This is OK for tracing.
)
// If we can't get the builder value, just use local block.
if higherValueBuilder && withdrawalsMatched { // Builder value is higher and withdrawals match.
blk.SetBlinded(true)
if err := blk.SetExecution(builderPayload); err != nil {
log.WithError(err).Warn("Proposer: failed to set builder payload")
blk.SetBlinded(false)
return blk.SetExecution(localPayload)
default: // Bellatrix case.
blk.SetBlinded(true)
if err := blk.SetExecution(builderPayload); err != nil {
log.WithError(err).Warn("Proposer: failed to set builder payload")
blk.SetBlinded(false)
} else {
return nil
}
} else {
return nil
}
}
if !higherValueBuilder {
log.WithFields(logrus.Fields{
"localGweiValue": localValueGwei,
"localBoostPercentage": boost,
"builderGweiValue": builderValueGwei,
}).Warn("Proposer: using local execution payload because higher value")
}
span.AddAttributes(
trace.BoolAttribute("higherValueBuilder", higherValueBuilder),
trace.Int64Attribute("localGweiValue", int64(localValueGwei)), // lint:ignore uintcast -- This is OK for tracing.
trace.Int64Attribute("localBoostPercentage", int64(boost)), // lint:ignore uintcast -- This is OK for tracing.
trace.Int64Attribute("builderGweiValue", int64(builderValueGwei)), // lint:ignore uintcast -- This is OK for tracing.
)
return blk.SetExecution(localPayload)
default: // Bellatrix case.
blk.SetBlinded(true)
if err := blk.SetExecution(builderPayload); err != nil {
log.WithError(err).Warn("Proposer: failed to set builder payload")
blk.SetBlinded(false)
return blk.SetExecution(localPayload)
} else {
return nil
}
}
executionData, err := vs.getExecutionPayload(ctx, slot, idx, blk.Block().ParentRoot(), headState)
if err != nil {
return errors.Wrap(err, "failed to get execution payload")
}
return blk.SetExecution(executionData)
}
// This function retrieves the payload header given the slot number and the validator index.
@@ -222,9 +209,12 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot primitiv
}
log.WithFields(logrus.Fields{
"value": v.String(),
"builderPubKey": fmt.Sprintf("%#x", bid.Pubkey()),
"blockHash": fmt.Sprintf("%#x", header.BlockHash()),
"value": v.String(),
"builderPubKey": fmt.Sprintf("%#x", bid.Pubkey()),
"blockHash": fmt.Sprintf("%#x", header.BlockHash()),
"slot": slot,
"validator": idx,
"sinceSlotStartTime": time.Since(t),
}).Info("Received header with bid")
span.AddAttributes(
@@ -236,129 +226,6 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot primitiv
return header, nil
}
// This function retrieves the full payload block using the input blind block. This input must be versioned as
// bellatrix blind block. The output block will contain the full payload. The original header block
// will be returned the block builder is not configured.
func (vs *Server) unblindBuilderBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ReadOnlySignedBeaconBlock, error) {
if err := consensusblocks.BeaconBlockIsNil(b); err != nil {
return nil, err
}
// No-op if the input block is not version blind and bellatrix.
if b.Version() != version.Bellatrix || !b.IsBlinded() {
return b, nil
}
// No-op nothing if the builder has not been configured.
if !vs.BlockBuilder.Configured() {
return b, nil
}
agg, err := b.Block().Body().SyncAggregate()
if err != nil {
return nil, err
}
h, err := b.Block().Body().Execution()
if err != nil {
return nil, err
}
header, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
if !ok {
return nil, errors.New("execution data must be execution payload header")
}
parentRoot := b.Block().ParentRoot()
stateRoot := b.Block().StateRoot()
randaoReveal := b.Block().Body().RandaoReveal()
graffiti := b.Block().Body().Graffiti()
sig := b.Signature()
psb := &ethpb.SignedBlindedBeaconBlockBellatrix{
Block: &ethpb.BlindedBeaconBlockBellatrix{
Slot: b.Block().Slot(),
ProposerIndex: b.Block().ProposerIndex(),
ParentRoot: parentRoot[:],
StateRoot: stateRoot[:],
Body: &ethpb.BlindedBeaconBlockBodyBellatrix{
RandaoReveal: randaoReveal[:],
Eth1Data: b.Block().Body().Eth1Data(),
Graffiti: graffiti[:],
ProposerSlashings: b.Block().Body().ProposerSlashings(),
AttesterSlashings: b.Block().Body().AttesterSlashings(),
Attestations: b.Block().Body().Attestations(),
Deposits: b.Block().Body().Deposits(),
VoluntaryExits: b.Block().Body().VoluntaryExits(),
SyncAggregate: agg,
ExecutionPayloadHeader: header,
},
},
Signature: sig[:],
}
sb, err := consensusblocks.NewSignedBeaconBlock(psb)
if err != nil {
return nil, errors.Wrap(err, "could not create signed block")
}
payload, err := vs.BlockBuilder.SubmitBlindedBlock(ctx, sb)
if err != nil {
return nil, err
}
headerRoot, err := header.HashTreeRoot()
if err != nil {
return nil, err
}
payloadRoot, err := payload.HashTreeRoot()
if err != nil {
return nil, err
}
if headerRoot != payloadRoot {
return nil, fmt.Errorf("header and payload root do not match, consider disconnect from relay to avoid further issues, "+
"%#x != %#x", headerRoot, payloadRoot)
}
pbPayload, err := payload.PbBellatrix()
if err != nil {
return nil, errors.Wrap(err, "could not get payload")
}
bb := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Slot: psb.Block.Slot,
ProposerIndex: psb.Block.ProposerIndex,
ParentRoot: psb.Block.ParentRoot,
StateRoot: psb.Block.StateRoot,
Body: &ethpb.BeaconBlockBodyBellatrix{
RandaoReveal: psb.Block.Body.RandaoReveal,
Eth1Data: psb.Block.Body.Eth1Data,
Graffiti: psb.Block.Body.Graffiti,
ProposerSlashings: psb.Block.Body.ProposerSlashings,
AttesterSlashings: psb.Block.Body.AttesterSlashings,
Attestations: psb.Block.Body.Attestations,
Deposits: psb.Block.Body.Deposits,
VoluntaryExits: psb.Block.Body.VoluntaryExits,
SyncAggregate: agg,
ExecutionPayload: pbPayload,
},
},
Signature: psb.Signature,
}
wb, err := consensusblocks.NewSignedBeaconBlock(bb)
if err != nil {
return nil, err
}
txs, err := payload.Transactions()
if err != nil {
return nil, errors.Wrap(err, "could not get transactions from payload")
}
log.WithFields(logrus.Fields{
"blockHash": fmt.Sprintf("%#x", h.BlockHash()),
"feeRecipient": fmt.Sprintf("%#x", h.FeeRecipient()),
"gasUsed": h.GasUsed,
"slot": b.Block().Slot(),
"txs": len(txs),
}).Info("Retrieved full payload from builder")
return wb, nil
}
// Validates builder signature and returns an error if the signature is invalid.
func validateBuilderSignature(signedBid builder.SignedBid) error {
d, err := signing.ComputeDomain(params.BeaconConfig().DomainApplicationBuilder,

View File

@@ -18,6 +18,7 @@ import (
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -70,12 +71,18 @@ func TestServer_setExecutionData(t *testing.T) {
BeaconDB: beaconDB,
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true, Cfg: &builderTest.Config{BeaconDB: beaconDB}},
ForkchoiceFetcher: &blockchainTest.ChainService{},
}
t.Run("No builder configured. Use local block", func(t *testing.T) {
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
require.NoError(t, err)
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
b := blk.Block()
localPayload, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
require.NoError(t, err)
builderPayload, err := vs.getBuilderPayload(ctx, b.Slot(), b.ProposerIndex())
require.NoError(t, err)
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload))
e, err := blk.Block().Body().Execution()
require.NoError(t, err)
require.Equal(t, uint64(1), e.BlockNumber()) // Local block
@@ -128,7 +135,13 @@ func TestServer_setExecutionData(t *testing.T) {
vs.ForkchoiceFetcher.SetForkChoiceGenesisTime(uint64(time.Now().Unix()))
vs.TimeFetcher = chain
vs.HeadFetcher = chain
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
b := blk.Block()
localPayload, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
require.NoError(t, err)
builderPayload, err := vs.getBuilderPayload(ctx, b.Slot(), b.ProposerIndex())
require.NoError(t, err)
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload))
e, err := blk.Block().Body().Execution()
require.NoError(t, err)
require.Equal(t, uint64(1), e.BlockNumber()) // Local block because incorrect withdrawals
@@ -184,7 +197,13 @@ func TestServer_setExecutionData(t *testing.T) {
vs.ForkchoiceFetcher.SetForkChoiceGenesisTime(uint64(time.Now().Unix()))
vs.TimeFetcher = chain
vs.HeadFetcher = chain
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
b := blk.Block()
localPayload, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
require.NoError(t, err)
builderPayload, err := vs.getBuilderPayload(ctx, b.Slot(), b.ProposerIndex())
require.NoError(t, err)
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload))
e, err := blk.Block().Body().Execution()
require.NoError(t, err)
require.Equal(t, uint64(2), e.BlockNumber()) // Builder block
@@ -193,7 +212,12 @@ func TestServer_setExecutionData(t *testing.T) {
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
require.NoError(t, err)
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: 2}
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
b := blk.Block()
localPayload, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
require.NoError(t, err)
builderPayload, err := vs.getBuilderPayload(ctx, b.Slot(), b.ProposerIndex())
require.NoError(t, err)
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload))
e, err := blk.Block().Body().Execution()
require.NoError(t, err)
require.Equal(t, uint64(3), e.BlockNumber()) // Local block
@@ -208,7 +232,12 @@ func TestServer_setExecutionData(t *testing.T) {
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
require.NoError(t, err)
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: 1}
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
b := blk.Block()
localPayload, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
require.NoError(t, err)
builderPayload, err := vs.getBuilderPayload(ctx, b.Slot(), b.ProposerIndex())
require.NoError(t, err)
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload))
e, err := blk.Block().Body().Execution()
require.NoError(t, err)
require.Equal(t, uint64(3), e.BlockNumber()) // Local block
@@ -224,7 +253,12 @@ func TestServer_setExecutionData(t *testing.T) {
Cfg: &builderTest.Config{BeaconDB: beaconDB},
}
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 4}, BlockValue: 0}
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
b := blk.Block()
localPayload, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
require.NoError(t, err)
builderPayload, err := vs.getBuilderPayload(ctx, b.Slot(), b.ProposerIndex())
require.ErrorIs(t, consensus_types.ErrNilObjectWrapped, err) // Builder returns fault. Use local block
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload))
e, err := blk.Block().Body().Execution()
require.NoError(t, err)
require.Equal(t, uint64(4), e.BlockNumber()) // Local block
@@ -465,146 +499,6 @@ func TestServer_getPayloadHeader(t *testing.T) {
}
}
func TestServer_getBuilderBlock(t *testing.T) {
p := emptyPayload()
p.GasLimit = 123
tests := []struct {
name string
blk interfaces.ReadOnlySignedBeaconBlock
mock *builderTest.MockBuilderService
err string
returnedBlk interfaces.ReadOnlySignedBeaconBlock
}{
{
name: "nil block",
blk: nil,
err: "signed beacon block can't be nil",
},
{
name: "old block version",
blk: func() interfaces.ReadOnlySignedBeaconBlock {
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
return wb
}(),
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
return wb
}(),
},
{
name: "not configured",
blk: func() interfaces.ReadOnlySignedBeaconBlock {
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: false,
},
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
require.NoError(t, err)
return wb
}(),
},
{
name: "submit blind block error",
blk: func() interfaces.ReadOnlySignedBeaconBlock {
b := util.NewBlindedBeaconBlockBellatrix()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
Payload: &v1.ExecutionPayload{},
HasConfigured: true,
ErrSubmitBlindedBlock: errors.New("can't submit"),
},
err: "can't submit",
},
{
name: "head and payload root mismatch",
blk: func() interfaces.ReadOnlySignedBeaconBlock {
b := util.NewBlindedBeaconBlockBellatrix()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: true,
Payload: p,
},
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
b := util.NewBeaconBlockBellatrix()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
b.Block.Body.ExecutionPayload = p
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
err: "header and payload root do not match",
},
{
name: "can get payload",
blk: func() interfaces.ReadOnlySignedBeaconBlock {
b := util.NewBlindedBeaconBlockBellatrix()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
txRoot, err := ssz.TransactionsRoot([][]byte{})
require.NoError(t, err)
b.Block.Body.ExecutionPayloadHeader = &v1.ExecutionPayloadHeader{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: txRoot[:],
GasLimit: 123,
}
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: true,
Payload: p,
},
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
b := util.NewBeaconBlockBellatrix()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
b.Block.Body.ExecutionPayload = p
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
vs := &Server{BlockBuilder: tc.mock}
gotBlk, err := vs.unblindBuilderBlock(context.Background(), tc.blk)
if tc.err != "" {
require.ErrorContains(t, tc.err, err)
} else {
require.NoError(t, err)
require.DeepEqual(t, tc.returnedBlk, gotBlk)
}
})
}
}
func TestServer_validateBuilderSignature(t *testing.T) {
sk, err := bls.RandKey()
require.NoError(t, err)

View File

@@ -1,17 +1,10 @@
package validator
import (
"context"
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/sirupsen/logrus"
)
// Sets the bls to exec data for a block.
@@ -34,130 +27,3 @@ func (vs *Server) setBlsToExecData(blk interfaces.SignedBeaconBlock, headState s
}
}
}
func (vs *Server) unblindBuilderBlockCapella(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ReadOnlySignedBeaconBlock, error) {
if err := consensusblocks.BeaconBlockIsNil(b); err != nil {
return nil, errors.Wrap(err, "block is nil")
}
// No-op if the input block is not version blind and capella.
if b.Version() != version.Capella || !b.IsBlinded() {
return b, nil
}
// No-op nothing if the builder has not been configured.
if !vs.BlockBuilder.Configured() {
return b, nil
}
agg, err := b.Block().Body().SyncAggregate()
if err != nil {
return nil, errors.Wrap(err, "could not get sync aggregate")
}
h, err := b.Block().Body().Execution()
if err != nil {
return nil, errors.Wrap(err, "could not get execution header")
}
header, ok := h.Proto().(*enginev1.ExecutionPayloadHeaderCapella)
if !ok {
return nil, errors.New("execution data must be execution payload header capella")
}
parentRoot := b.Block().ParentRoot()
stateRoot := b.Block().StateRoot()
randaoReveal := b.Block().Body().RandaoReveal()
graffiti := b.Block().Body().Graffiti()
sig := b.Signature()
blsToExecChange, err := b.Block().Body().BLSToExecutionChanges()
if err != nil {
return nil, errors.Wrap(err, "could not get bls to execution changes")
}
sb := &ethpb.SignedBlindedBeaconBlockCapella{
Block: &ethpb.BlindedBeaconBlockCapella{
Slot: b.Block().Slot(),
ProposerIndex: b.Block().ProposerIndex(),
ParentRoot: parentRoot[:],
StateRoot: stateRoot[:],
Body: &ethpb.BlindedBeaconBlockBodyCapella{
RandaoReveal: randaoReveal[:],
Eth1Data: b.Block().Body().Eth1Data(),
Graffiti: graffiti[:],
ProposerSlashings: b.Block().Body().ProposerSlashings(),
AttesterSlashings: b.Block().Body().AttesterSlashings(),
Attestations: b.Block().Body().Attestations(),
Deposits: b.Block().Body().Deposits(),
VoluntaryExits: b.Block().Body().VoluntaryExits(),
SyncAggregate: agg,
ExecutionPayloadHeader: header,
BlsToExecutionChanges: blsToExecChange,
},
},
Signature: sig[:],
}
wrappedSb, err := consensusblocks.NewSignedBeaconBlock(sb)
if err != nil {
return nil, errors.Wrap(err, "could not create signed block")
}
payload, err := vs.BlockBuilder.SubmitBlindedBlock(ctx, wrappedSb)
if err != nil {
return nil, errors.Wrap(err, "could not submit blinded block")
}
payloadHtr, err := payload.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "could not get payload hash tree root")
}
headerHtr, err := header.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "could not get header hash tree root")
}
if payloadHtr != headerHtr {
return nil, fmt.Errorf("payload hash tree root %x does not match header hash tree root %x", payloadHtr, headerHtr)
}
capellaPayload, err := payload.PbCapella()
if err != nil {
return nil, errors.Wrap(err, "could not get payload")
}
bb := &ethpb.SignedBeaconBlockCapella{
Block: &ethpb.BeaconBlockCapella{
Slot: sb.Block.Slot,
ProposerIndex: sb.Block.ProposerIndex,
ParentRoot: sb.Block.ParentRoot,
StateRoot: sb.Block.StateRoot,
Body: &ethpb.BeaconBlockBodyCapella{
RandaoReveal: sb.Block.Body.RandaoReveal,
Eth1Data: sb.Block.Body.Eth1Data,
Graffiti: sb.Block.Body.Graffiti,
ProposerSlashings: sb.Block.Body.ProposerSlashings,
AttesterSlashings: sb.Block.Body.AttesterSlashings,
Attestations: sb.Block.Body.Attestations,
Deposits: sb.Block.Body.Deposits,
VoluntaryExits: sb.Block.Body.VoluntaryExits,
SyncAggregate: agg,
ExecutionPayload: capellaPayload,
BlsToExecutionChanges: blsToExecChange,
},
},
Signature: sb.Signature,
}
wb, err := consensusblocks.NewSignedBeaconBlock(bb)
if err != nil {
return nil, errors.Wrap(err, "could not create signed block")
}
txs, err := payload.Transactions()
if err != nil {
return nil, errors.Wrap(err, "could not get transactions from payload")
}
log.WithFields(logrus.Fields{
"blockHash": fmt.Sprintf("%#x", h.BlockHash()),
"feeRecipient": fmt.Sprintf("%#x", h.FeeRecipient()),
"gasUsed": h.GasUsed,
"slot": b.Block().Slot(),
"txs": len(txs),
}).Info("Retrieved full capella payload from builder")
return wb, nil
}

View File

@@ -1,147 +0,0 @@
package validator
import (
"context"
"testing"
"github.com/pkg/errors"
builderTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/builder/testing"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
)
func TestServer_unblindBuilderCapellaBlock(t *testing.T) {
p := emptyPayloadCapella()
p.GasLimit = 123
tests := []struct {
name string
blk interfaces.ReadOnlySignedBeaconBlock
mock *builderTest.MockBuilderService
err string
returnedBlk interfaces.ReadOnlySignedBeaconBlock
}{
{
name: "nil block",
blk: nil,
err: "signed beacon block can't be nil",
},
{
name: "old block version",
blk: func() interfaces.ReadOnlySignedBeaconBlock {
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
return wb
}(),
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
return wb
}(),
},
{
name: "not configured",
blk: func() interfaces.ReadOnlySignedBeaconBlock {
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: false,
},
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
require.NoError(t, err)
return wb
}(),
},
{
name: "submit blind block error",
blk: func() interfaces.ReadOnlySignedBeaconBlock {
b := util.NewBlindedBeaconBlockCapella()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
PayloadCapella: &v1.ExecutionPayloadCapella{},
HasConfigured: true,
ErrSubmitBlindedBlock: errors.New("can't submit"),
},
err: "can't submit",
},
{
name: "can get payload",
blk: func() interfaces.ReadOnlySignedBeaconBlock {
b := util.NewBlindedBeaconBlockCapella()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
txRoot, err := ssz.TransactionsRoot(make([][]byte, 0))
require.NoError(t, err)
wdRoot, err := ssz.WithdrawalSliceRoot([]*v1.Withdrawal{}, fieldparams.MaxWithdrawalsPerPayload)
require.NoError(t, err)
b.Block.Body.ExecutionPayloadHeader = &v1.ExecutionPayloadHeaderCapella{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: txRoot[:],
GasLimit: 123,
WithdrawalsRoot: wdRoot[:],
}
b.Block.Body.BlsToExecutionChanges = []*eth.SignedBLSToExecutionChange{
{Message: &eth.BLSToExecutionChange{ValidatorIndex: 1, FromBlsPubkey: []byte{'a'}}},
{Message: &eth.BLSToExecutionChange{ValidatorIndex: 2, FromBlsPubkey: []byte{'b'}}},
{Message: &eth.BLSToExecutionChange{ValidatorIndex: 3, FromBlsPubkey: []byte{'c'}}},
{Message: &eth.BLSToExecutionChange{ValidatorIndex: 4, FromBlsPubkey: []byte{'d'}}},
}
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: true,
PayloadCapella: p,
},
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
b := util.NewBeaconBlockCapella()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
b.Block.Body.ExecutionPayload = p
b.Block.Body.BlsToExecutionChanges = []*eth.SignedBLSToExecutionChange{
{Message: &eth.BLSToExecutionChange{ValidatorIndex: 1, FromBlsPubkey: []byte{'a'}}},
{Message: &eth.BLSToExecutionChange{ValidatorIndex: 2, FromBlsPubkey: []byte{'b'}}},
{Message: &eth.BLSToExecutionChange{ValidatorIndex: 3, FromBlsPubkey: []byte{'c'}}},
{Message: &eth.BLSToExecutionChange{ValidatorIndex: 4, FromBlsPubkey: []byte{'d'}}},
}
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
vs := &Server{BlockBuilder: tc.mock}
gotBlk, err := vs.unblindBuilderBlockCapella(context.Background(), tc.blk)
if tc.err != "" {
require.ErrorContains(t, tc.err, err)
} else {
require.NoError(t, err)
require.DeepEqual(t, tc.returnedBlk, gotBlk)
}
})
}
}

View File

@@ -43,10 +43,17 @@ var (
// This returns the execution payload of a given slot. The function has full awareness of pre and post merge.
// The payload is computed given the respected time of merge.
func (vs *Server) getExecutionPayload(ctx context.Context, slot primitives.Slot, vIdx primitives.ValidatorIndex, headRoot [32]byte, st state.BeaconState) (interfaces.ExecutionData, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.getExecutionPayload")
func (vs *Server) getLocalPayload(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) (interfaces.ExecutionData, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.getLocalPayload")
defer span.End()
if blk.Version() < version.Bellatrix {
return nil, nil
}
slot := blk.Slot()
vIdx := blk.ProposerIndex()
headRoot := blk.ParentRoot()
proposerID, payloadId, ok := vs.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, headRoot)
feeRecipient := params.BeaconConfig().DefaultFeeRecipient
recipient, err := vs.BeaconDB.FeeRecipientByValidatorID(ctx, vIdx)
@@ -221,6 +228,27 @@ func (vs *Server) getTerminalBlockHashIfExists(ctx context.Context, transitionTi
return vs.ExecutionEngineCaller.GetTerminalBlockHash(ctx, transitionTime)
}
func (vs *Server) getBuilderPayload(ctx context.Context,
slot primitives.Slot,
vIdx primitives.ValidatorIndex) (interfaces.ExecutionData, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.getBuilderPayload")
defer span.End()
if slots.ToEpoch(slot) < params.BeaconConfig().BellatrixForkEpoch {
return nil, nil
}
canUseBuilder, err := vs.canUseBuilder(ctx, slot, vIdx)
if err != nil {
return nil, errors.Wrap(err, "failed to check if we can use the builder")
}
span.AddAttributes(trace.BoolAttribute("canUseBuilder", canUseBuilder))
if !canUseBuilder {
return nil, nil
}
return vs.getPayloadHeaderFromBuilder(ctx, slot, vIdx)
}
// activationEpochNotReached returns true if activation epoch has not been reach.
// Which satisfy the following conditions in spec:
//

View File

@@ -143,7 +143,13 @@ func TestServer_getExecutionPayload(t *testing.T) {
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
}
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(tt.st.Slot(), 100, [8]byte{100}, [32]byte{'a'})
_, err := vs.getExecutionPayload(context.Background(), tt.st.Slot(), tt.validatorIndx, [32]byte{'a'}, tt.st)
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = tt.st.Slot()
blk.Block.ProposerIndex = tt.validatorIndx
blk.Block.ParentRoot = bytesutil.PadTo([]byte{'a'}, 32)
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
_, err = vs.getLocalPayload(context.Background(), b.Block(), tt.st)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
@@ -179,7 +185,13 @@ func TestServer_getExecutionPayloadContextTimeout(t *testing.T) {
}
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nonTransitionSt.Slot(), 100, [8]byte{100}, [32]byte{'a'})
_, err = vs.getExecutionPayload(context.Background(), nonTransitionSt.Slot(), 100, [32]byte{'a'}, nonTransitionSt)
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = nonTransitionSt.Slot()
blk.Block.ProposerIndex = 100
blk.Block.ParentRoot = bytesutil.PadTo([]byte{'a'}, 32)
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
_, err = vs.getLocalPayload(context.Background(), b.Block(), nonTransitionSt)
require.NoError(t, err)
}
@@ -225,7 +237,13 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) {
BeaconDB: beaconDB,
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
}
gotPayload, err := vs.getExecutionPayload(context.Background(), transitionSt.Slot(), 0, [32]byte{}, transitionSt)
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = transitionSt.Slot()
blk.Block.ParentRoot = bytesutil.PadTo([]byte{}, 32)
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
gotPayload, err := vs.getLocalPayload(context.Background(), b.Block(), transitionSt)
require.NoError(t, err)
require.NotNil(t, gotPayload)
@@ -237,7 +255,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) {
payload.FeeRecipient = evilRecipientAddress[:]
vs.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
gotPayload, err = vs.getExecutionPayload(context.Background(), transitionSt.Slot(), 0, [32]byte{}, transitionSt)
gotPayload, err = vs.getLocalPayload(context.Background(), b.Block(), transitionSt)
require.NoError(t, err)
require.NotNil(t, gotPayload)

View File

@@ -42,6 +42,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/container/trie"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
@@ -547,6 +548,12 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
blockToPropose := util.NewBlindedBeaconBlockCapella()
blockToPropose.Block.Slot = 5
blockToPropose.Block.ParentRoot = parent[:]
txRoot, err := ssz.TransactionsRoot([][]byte{})
require.NoError(t, err)
withdrawalsRoot, err := ssz.WithdrawalSliceRoot([]*enginev1.Withdrawal{}, fieldparams.MaxWithdrawalsPerPayload)
require.NoError(t, err)
blockToPropose.Block.Body.ExecutionPayloadHeader.TransactionsRoot = txRoot[:]
blockToPropose.Block.Body.ExecutionPayloadHeader.WithdrawalsRoot = withdrawalsRoot[:]
blk := &ethpb.GenericSignedBeaconBlock_BlindedCapella{BlindedCapella: blockToPropose}
return &ethpb.GenericSignedBeaconBlock{Block: blk}
},
@@ -555,30 +562,19 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := dbutil.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
util.SaveBlock(t, ctx, db, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, db.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
proposerServer := &Server{
ChainStartFetcher: &mockExecution.Chain{},
Eth1InfoFetcher: &mockExecution.Chain{},
Eth1BlockFetcher: &mockExecution.Chain{},
BlockReceiver: c,
HeadFetcher: c,
BlockNotifier: c.BlockNotifier(),
P2P: mockp2p.NewTestP2P(t),
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true, PayloadCapella: emptyPayloadCapella()},
BlockReceiver: c,
BlockNotifier: c.BlockNotifier(),
P2P: mockp2p.NewTestP2P(t),
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true, PayloadCapella: emptyPayloadCapella()},
}
blockToPropose := tt.block(bsRoot)
res, err := proposerServer.ProposeBeaconBlock(context.Background(), blockToPropose)

View File

@@ -0,0 +1,184 @@
package validator
import (
"context"
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/builder"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/sirupsen/logrus"
"google.golang.org/protobuf/proto"
)
type unblinder struct {
b interfaces.SignedBeaconBlock
builder builder.BlockBuilder
}
func newUnblinder(b interfaces.SignedBeaconBlock, builder builder.BlockBuilder) (*unblinder, error) {
if err := consensusblocks.BeaconBlockIsNil(b); err != nil {
return nil, err
}
if builder == nil {
return nil, errors.New("nil builder provided")
}
return &unblinder{
b: b,
builder: builder,
}, nil
}
func (u *unblinder) unblindBuilderBlock(ctx context.Context) (interfaces.SignedBeaconBlock, error) {
if !u.b.IsBlinded() || u.b.Version() < version.Bellatrix {
return u.b, nil
}
if u.b.IsBlinded() && !u.builder.Configured() {
return nil, errors.New("builder not configured")
}
psb, err := u.blindedProtoBlock()
if err != nil {
return nil, errors.Wrap(err, "could not get blinded proto block")
}
sb, err := consensusblocks.NewSignedBeaconBlock(psb)
if err != nil {
return nil, errors.Wrap(err, "could not create signed block")
}
if err = copyBlockData(u.b, sb); err != nil {
return nil, errors.Wrap(err, "could not copy block data")
}
h, err := u.b.Block().Body().Execution()
if err != nil {
return nil, errors.Wrap(err, "could not get execution")
}
if err = sb.SetExecution(h); err != nil {
return nil, errors.Wrap(err, "could not set execution")
}
payload, err := u.builder.SubmitBlindedBlock(ctx, sb)
if err != nil {
return nil, errors.Wrap(err, "could not submit blinded block")
}
headerRoot, err := h.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "could not get header root")
}
payloadRoot, err := payload.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "could not get payload root")
}
if headerRoot != payloadRoot {
return nil, fmt.Errorf("header and payload root do not match, consider disconnect from relay to avoid further issues, "+
"%#x != %#x", headerRoot, payloadRoot)
}
bb, err := u.protoBlock()
if err != nil {
return nil, errors.Wrap(err, "could not get proto block")
}
wb, err := consensusblocks.NewSignedBeaconBlock(bb)
if err != nil {
return nil, errors.Wrap(err, "could not create signed block")
}
if err = copyBlockData(sb, wb); err != nil {
return nil, errors.Wrap(err, "could not copy block data")
}
if err = wb.SetExecution(payload); err != nil {
return nil, errors.Wrap(err, "could not set execution")
}
txs, err := payload.Transactions()
if err != nil {
return nil, errors.Wrap(err, "could not get transactions from payload")
}
log.WithFields(logrus.Fields{
"blockHash": fmt.Sprintf("%#x", h.BlockHash()),
"feeRecipient": fmt.Sprintf("%#x", h.FeeRecipient()),
"gasUsed": h.GasUsed(),
"slot": u.b.Block().Slot(),
"txs": len(txs),
}).Info("Retrieved full payload from builder")
return wb, nil
}
func copyBlockData(src interfaces.SignedBeaconBlock, dst interfaces.SignedBeaconBlock) error {
agg, err := src.Block().Body().SyncAggregate()
if err != nil {
return errors.Wrap(err, "could not get sync aggregate")
}
parentRoot := src.Block().ParentRoot()
stateRoot := src.Block().StateRoot()
randaoReveal := src.Block().Body().RandaoReveal()
graffiti := src.Block().Body().Graffiti()
sig := src.Signature()
blsToExecChanges, err := src.Block().Body().BLSToExecutionChanges()
if err != nil && !errors.Is(err, consensus_types.ErrUnsupportedField) {
return errors.Wrap(err, "could not get bls to execution changes")
}
dst.SetSlot(src.Block().Slot())
dst.SetProposerIndex(src.Block().ProposerIndex())
dst.SetParentRoot(parentRoot[:])
dst.SetStateRoot(stateRoot[:])
dst.SetRandaoReveal(randaoReveal[:])
dst.SetEth1Data(src.Block().Body().Eth1Data())
dst.SetGraffiti(graffiti[:])
dst.SetProposerSlashings(src.Block().Body().ProposerSlashings())
dst.SetAttesterSlashings(src.Block().Body().AttesterSlashings())
dst.SetAttestations(src.Block().Body().Attestations())
dst.SetDeposits(src.Block().Body().Deposits())
dst.SetVoluntaryExits(src.Block().Body().VoluntaryExits())
if err = dst.SetSyncAggregate(agg); err != nil {
return errors.Wrap(err, "could not set sync aggregate")
}
dst.SetSignature(sig[:])
if err = dst.SetBLSToExecutionChanges(blsToExecChanges); err != nil && !errors.Is(err, consensus_types.ErrUnsupportedField) {
return errors.Wrap(err, "could not set bls to execution changes")
}
return nil
}
func (u *unblinder) blindedProtoBlock() (proto.Message, error) {
switch u.b.Version() {
case version.Bellatrix:
return &ethpb.SignedBlindedBeaconBlockBellatrix{
Block: &ethpb.BlindedBeaconBlockBellatrix{
Body: &ethpb.BlindedBeaconBlockBodyBellatrix{},
},
}, nil
case version.Capella:
return &ethpb.SignedBlindedBeaconBlockCapella{
Block: &ethpb.BlindedBeaconBlockCapella{
Body: &ethpb.BlindedBeaconBlockBodyCapella{},
},
}, nil
default:
return nil, fmt.Errorf("invalid version %s", version.String(u.b.Version()))
}
}
func (u *unblinder) protoBlock() (proto.Message, error) {
switch u.b.Version() {
case version.Bellatrix:
return &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{},
},
}, nil
case version.Capella:
return &ethpb.SignedBeaconBlockCapella{
Block: &ethpb.BeaconBlockCapella{
Body: &ethpb.BeaconBlockBodyCapella{},
},
}, nil
default:
return nil, fmt.Errorf("invalid version %s", version.String(u.b.Version()))
}
}

View File

@@ -0,0 +1,264 @@
package validator
import (
"context"
"testing"
"github.com/pkg/errors"
builderTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/builder/testing"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
)
func Test_unblindBuilderBlock(t *testing.T) {
p := emptyPayload()
p.GasLimit = 123
pCapella := emptyPayloadCapella()
pCapella.GasLimit = 123
tests := []struct {
name string
blk interfaces.SignedBeaconBlock
mock *builderTest.MockBuilderService
err string
returnedBlk interfaces.SignedBeaconBlock
}{
{
name: "old block version",
blk: func() interfaces.SignedBeaconBlock {
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
return wb
}(),
returnedBlk: func() interfaces.SignedBeaconBlock {
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
return wb
}(),
},
{
name: "blinded without configured builder",
blk: func() interfaces.SignedBeaconBlock {
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: false,
},
err: "builder not configured",
},
{
name: "non-blinded without configured builder",
blk: func() interfaces.SignedBeaconBlock {
b := util.NewBeaconBlockBellatrix()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
b.Block.Body.ExecutionPayload = &v1.ExecutionPayload{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
Transactions: make([][]byte, 0),
GasLimit: 123,
}
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: false,
Payload: p,
},
returnedBlk: func() interfaces.SignedBeaconBlock {
b := util.NewBeaconBlockBellatrix()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
b.Block.Body.ExecutionPayload = p
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
},
{
name: "submit blind block error",
blk: func() interfaces.SignedBeaconBlock {
b := util.NewBlindedBeaconBlockBellatrix()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
Payload: &v1.ExecutionPayload{},
HasConfigured: true,
ErrSubmitBlindedBlock: errors.New("can't submit"),
},
err: "can't submit",
},
{
name: "head and payload root mismatch",
blk: func() interfaces.SignedBeaconBlock {
b := util.NewBlindedBeaconBlockBellatrix()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: true,
Payload: p,
},
returnedBlk: func() interfaces.SignedBeaconBlock {
b := util.NewBeaconBlockBellatrix()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
b.Block.Body.ExecutionPayload = p
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
err: "header and payload root do not match",
},
{
name: "can get payload Bellatrix",
blk: func() interfaces.SignedBeaconBlock {
b := util.NewBlindedBeaconBlockBellatrix()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
txRoot, err := ssz.TransactionsRoot([][]byte{})
require.NoError(t, err)
b.Block.Body.ExecutionPayloadHeader = &v1.ExecutionPayloadHeader{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: txRoot[:],
GasLimit: 123,
}
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: true,
Payload: p,
},
returnedBlk: func() interfaces.SignedBeaconBlock {
b := util.NewBeaconBlockBellatrix()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
b.Block.Body.ExecutionPayload = p
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
},
{
name: "can get payload Capella",
blk: func() interfaces.SignedBeaconBlock {
b := util.NewBlindedBeaconBlockCapella()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
b.Block.Body.BlsToExecutionChanges = []*eth.SignedBLSToExecutionChange{
{
Message: &eth.BLSToExecutionChange{
ValidatorIndex: 123,
FromBlsPubkey: []byte{'a'},
ToExecutionAddress: []byte{'a'},
},
Signature: []byte("sig123"),
},
{
Message: &eth.BLSToExecutionChange{
ValidatorIndex: 456,
FromBlsPubkey: []byte{'b'},
ToExecutionAddress: []byte{'b'},
},
Signature: []byte("sig456"),
},
}
txRoot, err := ssz.TransactionsRoot([][]byte{})
require.NoError(t, err)
withdrawalsRoot, err := ssz.WithdrawalSliceRoot([]*v1.Withdrawal{}, fieldparams.MaxWithdrawalsPerPayload)
require.NoError(t, err)
b.Block.Body.ExecutionPayloadHeader = &v1.ExecutionPayloadHeaderCapella{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: txRoot[:],
WithdrawalsRoot: withdrawalsRoot[:],
GasLimit: 123,
}
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: true,
PayloadCapella: pCapella,
},
returnedBlk: func() interfaces.SignedBeaconBlock {
b := util.NewBeaconBlockCapella()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
b.Block.Body.BlsToExecutionChanges = []*eth.SignedBLSToExecutionChange{
{
Message: &eth.BLSToExecutionChange{
ValidatorIndex: 123,
FromBlsPubkey: []byte{'a'},
ToExecutionAddress: []byte{'a'},
},
Signature: []byte("sig123"),
},
{
Message: &eth.BLSToExecutionChange{
ValidatorIndex: 456,
FromBlsPubkey: []byte{'b'},
ToExecutionAddress: []byte{'b'},
},
Signature: []byte("sig456"),
},
}
b.Block.Body.ExecutionPayload = pCapella
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
unblinder, err := newUnblinder(tc.blk, tc.mock)
require.NoError(t, err)
gotBlk, err := unblinder.unblindBuilderBlock(context.Background())
if tc.err != "" {
require.ErrorContains(t, tc.err, err)
} else {
require.NoError(t, err)
require.DeepEqual(t, tc.returnedBlk, gotBlk)
}
})
}
}

View File

@@ -340,7 +340,10 @@ func (s *Service) Start() {
ExecutionPayloadReconstructor: s.cfg.ExecutionPayloadReconstructor,
BLSChangesPool: s.cfg.BLSChangesPool,
FinalizationFetcher: s.cfg.FinalizationFetcher,
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
}
s.cfg.Router.HandleFunc("/eth/v2/beacon/blocks", beaconChainServerV1.PublishBlockV2)
s.cfg.Router.HandleFunc("/eth/v2/beacon/blinded_blocks", beaconChainServerV1.PublishBlindedBlockV2)
ethpbv1alpha1.RegisterNodeServer(s.grpcServer, nodeServer)
ethpbservice.RegisterBeaconNodeServer(s.grpcServer, nodeServerV1)
ethpbv1alpha1.RegisterHealthServer(s.grpcServer, nodeServer)

View File

@@ -20,6 +20,7 @@ type BeaconState interface {
ReadOnlyBeaconState
WriteOnlyBeaconState
Copy() BeaconState
CopyAllTries()
HashTreeRoot(ctx context.Context) ([32]byte, error)
StateProver
}

View File

@@ -2,11 +2,13 @@ package state_native
import (
"context"
"fmt"
"strconv"
"sync"
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stateutil"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
@@ -271,6 +273,94 @@ func TestBeaconState_NoDeadlock_Capella(t *testing.T) {
}
func TestBeaconState_AppendBalanceWithTrie(t *testing.T) {
newState := generateState(t)
st, ok := newState.(*BeaconState)
require.Equal(t, true, ok)
_, err := st.HashTreeRoot(context.Background())
assert.NoError(t, err)
for i := 0; i < 100; i++ {
if i%2 == 0 {
assert.NoError(t, st.UpdateBalancesAtIndex(primitives.ValidatorIndex(i), 1000))
}
if i%3 == 0 {
assert.NoError(t, st.AppendBalance(1000))
}
}
_, err = st.HashTreeRoot(context.Background())
assert.NoError(t, err)
newRt := bytesutil.ToBytes32(st.merkleLayers[0][types.Balances])
wantedRt, err := stateutil.Uint64ListRootWithRegistryLimit(st.Balances())
assert.NoError(t, err)
assert.Equal(t, wantedRt, newRt, "state roots are unequal")
}
func TestBeaconState_ModifyPreviousParticipationBits(t *testing.T) {
st, err := InitializeFromProtoUnsafePhase0(&ethpb.BeaconState{})
assert.NoError(t, err)
assert.ErrorContains(t, "ModifyPreviousParticipationBits is not supported", st.ModifyPreviousParticipationBits(func(val []byte) ([]byte, error) {
return nil, nil
}))
}
func TestBeaconState_ModifyCurrentParticipationBits(t *testing.T) {
st, err := InitializeFromProtoUnsafePhase0(&ethpb.BeaconState{})
assert.NoError(t, err)
assert.ErrorContains(t, "ModifyCurrentParticipationBits is not supported", st.ModifyCurrentParticipationBits(func(val []byte) ([]byte, error) {
return nil, nil
}))
}
func TestCopyAllTries(t *testing.T) {
newState := generateState(t)
_, err := newState.HashTreeRoot(context.Background())
assert.NoError(t, err)
assert.NoError(t, newState.UpdateBalancesAtIndex(0, 10000))
assert.NoError(t, newState.UpdateBlockRootAtIndex(0, [32]byte{'a'}))
_, err = newState.HashTreeRoot(context.Background())
assert.NoError(t, err)
st, ok := newState.(*BeaconState)
require.Equal(t, true, ok)
obj := st.stateFieldLeaves[types.Balances]
fieldAddr := fmt.Sprintf("%p", obj)
nState, ok := st.Copy().(*BeaconState)
require.Equal(t, true, ok)
obj = nState.stateFieldLeaves[types.Balances]
newFieldAddr := fmt.Sprintf("%p", obj)
assert.Equal(t, fieldAddr, newFieldAddr)
assert.Equal(t, 2, int(obj.FieldReference().Refs()))
nState.CopyAllTries()
obj = nState.stateFieldLeaves[types.Balances]
updatedFieldAddr := fmt.Sprintf("%p", obj)
assert.NotEqual(t, fieldAddr, updatedFieldAddr)
assert.Equal(t, 1, int(obj.FieldReference().Refs()))
assert.NoError(t, nState.UpdateBalancesAtIndex(20, 10000))
_, err = nState.HashTreeRoot(context.Background())
assert.NoError(t, err)
rt, err := st.stateFieldLeaves[types.Balances].TrieRoot()
assert.NoError(t, err)
newRt, err := nState.stateFieldLeaves[types.Balances].TrieRoot()
assert.NoError(t, err)
assert.NotEqual(t, rt, newRt)
}
func generateState(t *testing.T) state.BeaconState {
count := uint64(100)
vals := make([]*ethpb.Validator, 0, count)
bals := make([]uint64, 0, count)
@@ -334,39 +424,5 @@ func TestBeaconState_AppendBalanceWithTrie(t *testing.T) {
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
})
assert.NoError(t, err)
st, ok := newState.(*BeaconState)
require.Equal(t, true, ok)
_, err = st.HashTreeRoot(context.Background())
assert.NoError(t, err)
for i := 0; i < 100; i++ {
if i%2 == 0 {
assert.NoError(t, st.UpdateBalancesAtIndex(primitives.ValidatorIndex(i), 1000))
}
if i%3 == 0 {
assert.NoError(t, st.AppendBalance(1000))
}
}
_, err = st.HashTreeRoot(context.Background())
assert.NoError(t, err)
newRt := bytesutil.ToBytes32(st.merkleLayers[0][types.Balances])
wantedRt, err := stateutil.Uint64ListRootWithRegistryLimit(st.Balances())
assert.NoError(t, err)
assert.Equal(t, wantedRt, newRt, "state roots are unequal")
}
func TestBeaconState_ModifyPreviousParticipationBits(t *testing.T) {
st, err := InitializeFromProtoUnsafePhase0(&ethpb.BeaconState{})
assert.NoError(t, err)
assert.ErrorContains(t, "ModifyPreviousParticipationBits is not supported", st.ModifyPreviousParticipationBits(func(val []byte) ([]byte, error) {
return nil, nil
}))
}
func TestBeaconState_ModifyCurrentParticipationBits(t *testing.T) {
st, err := InitializeFromProtoUnsafePhase0(&ethpb.BeaconState{})
assert.NoError(t, err)
assert.ErrorContains(t, "ModifyCurrentParticipationBits is not supported", st.ModifyCurrentParticipationBits(func(val []byte) ([]byte, error) {
return nil, nil
}))
return newState
}

View File

@@ -840,6 +840,26 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
return [32]byte{}, errors.New("invalid field index provided")
}
// CopyAllTries copies our field tries from the state. This is used to
// remove shared field tries which have references to other states and
// only have this copied set referencing to the current state.
func (b *BeaconState) CopyAllTries() {
b.lock.Lock()
defer b.lock.Unlock()
for fldIdx, fieldTrie := range b.stateFieldLeaves {
if fieldTrie.FieldReference() != nil {
fieldTrie.Lock()
if fieldTrie.FieldReference().Refs() > 1 {
fieldTrie.FieldReference().MinusRef()
newTrie := fieldTrie.CopyTrie()
b.stateFieldLeaves[fldIdx] = newTrie
}
fieldTrie.Unlock()
}
}
}
func (b *BeaconState) recomputeFieldTrie(index types.FieldIndex, elements interface{}) ([32]byte, error) {
fTrie := b.stateFieldLeaves[index]
fTrieMutex := fTrie.RWMutex

View File

@@ -4,6 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"batch_verifier.go",
"block_batcher.go",
"broadcast_bls_changes.go",
"context.go",
"deadlines.go",
@@ -37,7 +38,6 @@ go_library(
"subscriber_sync_committee_message.go",
"subscriber_sync_contribution_proof.go",
"subscription_topic_handler.go",
"utils.go",
"validate_aggregate_proof.go",
"validate_attester_slashing.go",
"validate_beacon_attestation.go",
@@ -134,6 +134,7 @@ go_test(
size = "small",
srcs = [
"batch_verifier_test.go",
"block_batcher_test.go",
"broadcast_bls_changes_test.go",
"context_test.go",
"decode_pubsub_test.go",
@@ -146,6 +147,7 @@ go_test(
"rpc_beacon_blocks_by_root_test.go",
"rpc_chunked_response_test.go",
"rpc_goodbye_test.go",
"rpc_handler_test.go",
"rpc_metadata_test.go",
"rpc_ping_test.go",
"rpc_send_request_test.go",
@@ -158,7 +160,6 @@ go_test(
"subscription_topic_handler_test.go",
"sync_fuzz_test.go",
"sync_test.go",
"utils_test.go",
"validate_aggregate_proof_test.go",
"validate_attester_slashing_test.go",
"validate_beacon_attestation_test.go",

View File

@@ -0,0 +1,231 @@
package sync
import (
"context"
"fmt"
"sort"
"time"
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
)
// blockRangeBatcher encapsulates the logic for splitting up a block range request into fixed-size batches of
// blocks that are retrieved from the database, ensured to be canonical, sequential and unique.
// If a non-nil value for ticker is set, it will be used to pause between batches lookups, as a rate-limiter.
type blockRangeBatcher struct {
start primitives.Slot
end primitives.Slot
size uint64
db db.NoHeadAccessDatabase
limiter *limiter
ticker *time.Ticker
cf *canonicalFilter
current *blockBatch
}
func newBlockRangeBatcher(rp rangeParams, bdb db.NoHeadAccessDatabase, limiter *limiter, canonical canonicalChecker, ticker *time.Ticker) (*blockRangeBatcher, error) {
if bdb == nil {
return nil, errors.New("nil db param, unable to initialize blockRangeBatcher")
}
if limiter == nil {
return nil, errors.New("nil limiter param, unable to initialize blockRangeBatcher")
}
if canonical == nil {
return nil, errors.New("nil canonicalChecker param, unable to initialize blockRangeBatcher")
}
if ticker == nil {
return nil, errors.New("nil ticker param, unable to initialize blockRangeBatcher")
}
if rp.size == 0 {
return nil, fmt.Errorf("invalid batch size of %d", rp.size)
}
if rp.end < rp.start {
return nil, fmt.Errorf("batch end slot %d is lower than batch start %d", rp.end, rp.start)
}
cf := &canonicalFilter{canonical: canonical}
return &blockRangeBatcher{
start: rp.start,
end: rp.end,
size: rp.size,
db: bdb,
limiter: limiter,
ticker: ticker,
cf: cf,
}, nil
}
func (bb *blockRangeBatcher) next(ctx context.Context, stream libp2pcore.Stream) (blockBatch, bool) {
var nb blockBatch
var more bool
// The result of each call to next() is saved in the `current` field.
// If current is not nil, current.next figures out the next batch based on the previous one.
// If current is nil, newBlockBatch is used to generate the first batch.
if bb.current != nil {
current := *bb.current
nb, more = current.next(bb.end, bb.size)
} else {
nb, more = newBlockBatch(bb.start, bb.end, bb.size)
}
// newBlockBatch and next() both return a boolean to indicate whether calling .next() will yield another batch
// (based on the whether we've gotten to the end slot yet). blockRangeBatcher.next does the same,
// and returns (zero value, false), to signal the end of the iteration.
if !more {
return blockBatch{}, false
}
if err := bb.limiter.validateRequest(stream, bb.size); err != nil {
return blockBatch{err: errors.Wrap(err, "throttled by rate limiter")}, false
}
// Wait for the ticker before doing anything expensive, unless this is the first batch.
if bb.ticker != nil && bb.current != nil {
<-bb.ticker.C
}
filter := filters.NewFilter().SetStartSlot(nb.start).SetEndSlot(nb.end)
blks, roots, err := bb.db.Blocks(ctx, filter)
if err != nil {
return blockBatch{err: errors.Wrap(err, "Could not retrieve blocks")}, false
}
rob := make([]blocks.ROBlock, 0)
if nb.start == 0 {
gb, err := bb.genesisBlock(ctx)
if err != nil {
return blockBatch{err: errors.Wrap(err, "could not retrieve genesis block")}, false
}
rob = append(rob, gb)
}
for i := 0; i < len(blks); i++ {
rb, err := blocks.NewROBlockWithRoot(blks[i], roots[i])
if err != nil {
return blockBatch{err: errors.Wrap(err, "Could not initialize ROBlock")}, false
}
rob = append(rob, rb)
}
// Filter and sort our retrieved blocks, so that we only return valid sets of blocks.
nb.lin, nb.nonlin, nb.err = bb.cf.filter(ctx, rob)
// Decrease allowed blocks capacity by the number of streamed blocks.
bb.limiter.add(stream, int64(1+nb.end.SubSlot(nb.start)))
bb.current = &nb
return *bb.current, true
}
func (bb *blockRangeBatcher) genesisBlock(ctx context.Context) (blocks.ROBlock, error) {
b, err := bb.db.GenesisBlock(ctx)
if err != nil {
return blocks.ROBlock{}, err
}
htr, err := b.Block().HashTreeRoot()
if err != nil {
return blocks.ROBlock{}, err
}
return blocks.NewROBlockWithRoot(b, htr)
}
type blockBatch struct {
start primitives.Slot
end primitives.Slot
lin []blocks.ROBlock // lin is a linear chain of blocks connected through parent_root. broken tails go in nonlin.
nonlin []blocks.ROBlock // if there is a break in the chain of parent->child relationships, the tail is stored here.
err error
}
func newBlockBatch(start, reqEnd primitives.Slot, size uint64) (blockBatch, bool) {
if start > reqEnd {
return blockBatch{}, false
}
nb := blockBatch{start: start, end: start.Add(size - 1)}
if nb.end > reqEnd {
nb.end = reqEnd
}
return nb, true
}
func (bat blockBatch) next(reqEnd primitives.Slot, size uint64) (blockBatch, bool) {
if bat.error() != nil {
return bat, false
}
if bat.nonLinear() {
return blockBatch{}, false
}
return newBlockBatch(bat.end.Add(1), reqEnd, size)
}
// blocks returns the list of linear, canonical blocks read from the db.
func (bb blockBatch) canonical() []blocks.ROBlock {
return bb.lin
}
// nonLinear is used to determine if there was a break in the chain of canonical blocks as read from the db.
// If true, code using the blockBatch should stop serving additional batches of blocks.
func (bb blockBatch) nonLinear() bool {
return len(bb.nonlin) > 0
}
func (bb blockBatch) error() error {
return bb.err
}
type canonicalChecker func(context.Context, [32]byte) (bool, error)
type canonicalFilter struct {
prevRoot [32]byte
canonical canonicalChecker
}
// filters all the provided blocks to ensure they are canonical and strictly linear.
func (cf *canonicalFilter) filter(ctx context.Context, blks []blocks.ROBlock) ([]blocks.ROBlock, []blocks.ROBlock, error) {
blks = sortedUniqueBlocks(blks)
seq := make([]blocks.ROBlock, 0, len(blks))
nseq := make([]blocks.ROBlock, 0)
for i, b := range blks {
cb, err := cf.canonical(ctx, b.Root())
if err != nil {
return nil, nil, err
}
if !cb {
continue
}
// prevRoot will be the zero value until we find the first canonical block in the stream seen by an instance
// of canonicalFilter. filter is called in batches; prevRoot can be the last root from the previous batch.
first := cf.prevRoot == [32]byte{}
// We assume blocks are processed in order, so the previous canonical root should be the parent of the next.
if !first && cf.prevRoot != b.Block().ParentRoot() {
// If the current block isn't descended from the last, something is wrong. Append everything remaining
// to the list of non-linear blocks, and stop building the canonical list.
nseq = append(nseq, blks[i:]...)
break
}
seq = append(seq, blks[i])
// Set the previous root as the
// newly added block's root
cf.prevRoot = b.Root()
}
return seq, nseq, nil
}
// returns a copy of the []ROBlock list in sorted order with duplicates removed
func sortedUniqueBlocks(blks []blocks.ROBlock) []blocks.ROBlock {
// Remove duplicate blocks received
sort.Sort(blocks.ROBlockSlice(blks))
if len(blks) < 2 {
return blks
}
u := 0
for i := 1; i < len(blks); i++ {
if blks[i].Root() != blks[u].Root() {
u += 1
if u != i {
blks[u] = blks[i]
}
}
}
return blks[:u+1]
}

View File

@@ -0,0 +1,134 @@
package sync
import (
"math/rand"
"testing"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestSortedObj_SortBlocksRoots(t *testing.T) {
source := rand.NewSource(33)
randGen := rand.New(source)
randFunc := func() int64 {
return randGen.Int63n(50)
}
var blks []blocks.ROBlock
for i := 0; i < 10; i++ {
slot := primitives.Slot(randFunc())
newBlk, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: slot, Body: &ethpb.BeaconBlockBody{}}})
require.NoError(t, err)
root := bytesutil.ToBytes32(bytesutil.Bytes32(uint64(slot)))
b, err := blocks.NewROBlockWithRoot(newBlk, root)
require.NoError(t, err)
blks = append(blks, b)
}
newBlks := sortedUniqueBlocks(blks)
previousSlot := primitives.Slot(0)
for _, b := range newBlks {
if b.Block().Slot() < previousSlot {
t.Errorf("Block list is not sorted as %d is smaller than previousSlot %d", b.Block().Slot(), previousSlot)
}
previousSlot = b.Block().Slot()
}
}
func TestSortedObj_NoDuplicates(t *testing.T) {
source := rand.NewSource(33)
randGen := rand.New(source)
var blks []blocks.ROBlock
randFunc := func() int64 {
return randGen.Int63n(50)
}
for i := 0; i < 10; i++ {
slot := primitives.Slot(randFunc())
newBlk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: slot, Body: &ethpb.BeaconBlockBody{}}}
// append twice
wsb, err := blocks.NewSignedBeaconBlock(newBlk)
require.NoError(t, err)
wsbCopy, err := wsb.Copy()
require.NoError(t, err)
root := bytesutil.ToBytes32(bytesutil.Bytes32(uint64(slot)))
b, err := blocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
b2, err := blocks.NewROBlockWithRoot(wsbCopy, root)
require.NoError(t, err)
blks = append(blks, b, b2)
}
dedup := sortedUniqueBlocks(blks)
roots := make(map[[32]byte]int)
for i, b := range dedup {
if di, dup := roots[b.Root()]; dup {
t.Errorf("Duplicated root %#x at index %d and %d", b.Root(), di, i)
}
roots[b.Root()] = i
}
}
func TestBlockBatchNext(t *testing.T) {
cases := []struct {
name string
batch blockBatch
start primitives.Slot
reqEnd primitives.Slot
size uint64
next []blockBatch
more []bool
err error
}{
{
name: "end aligned",
batch: blockBatch{start: 0, end: 20},
start: 0,
reqEnd: 40,
size: 20,
next: []blockBatch{
{start: 0, end: 19},
{start: 20, end: 39},
{start: 40, end: 40},
{},
},
more: []bool{true, true, true, false},
},
{
name: "batches with more",
batch: blockBatch{start: 0, end: 22},
start: 0,
reqEnd: 40,
size: 23,
next: []blockBatch{
{start: 0, end: 22},
{start: 23, end: 40},
{},
},
more: []bool{true, true, false},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
var next blockBatch
var more bool
i := 0
for next, more = newBlockBatch(c.start, c.reqEnd, c.size); more; next, more = next.next(c.reqEnd, c.size) {
exp := c.next[i]
require.Equal(t, c.more[i], more)
require.Equal(t, exp.start, next.start)
require.Equal(t, exp.end, next.end)
if exp.err != nil {
require.ErrorIs(t, next.err, exp.err)
} else {
require.NoError(t, next.err)
}
i++
}
})
}
}

View File

@@ -118,6 +118,7 @@ go_test(
"//beacon-chain/startup:go_default_library",
"//beacon-chain/sync:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",

View File

@@ -20,6 +20,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
beaconsync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -55,6 +56,11 @@ func TestMain(m *testing.M) {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(io.Discard)
resetCfg := features.InitWithReset(&features.Flags{
EnablePeerScorer: true,
})
defer resetCfg()
resetFlags := flags.Get()
flags.Init(&flags.GlobalFlags{
BlockBatchLimit: 64,

View File

@@ -127,9 +127,9 @@ var (
)
// Sync committee verification performance.
syncMessagesForUnkownBlocks = promauto.NewCounter(
syncMessagesForUnknownBlocks = promauto.NewCounter(
prometheus.CounterOpts{
Name: "sync_committee_messages_unnkown_root",
Name: "sync_committee_messages_unknown_root",
Help: "The number of sync committee messages that are checked against DB to see if there vote is for an unknown root",
},
)

View File

@@ -21,6 +21,7 @@ import (
// This defines how often a node cleans up and processes pending attestations in the queue.
var processPendingAttsPeriod = slots.DivideSlotBy(2 /* twice per slot */)
var pendingAttsLimit = 10000
// This processes pending attestation queues on every `processPendingAttsPeriod`.
func (s *Service) processPendingAttsQueue() {
@@ -164,6 +165,16 @@ func (s *Service) savePendingAtt(att *ethpb.SignedAggregateAttestationAndProof)
s.pendingAttsLock.Lock()
defer s.pendingAttsLock.Unlock()
numOfPendingAtts := 0
for _, v := range s.blkRootToPendingAtts {
numOfPendingAtts += len(v)
}
// Exit early if we exceed the pending attestations limit.
if numOfPendingAtts >= pendingAttsLimit {
return
}
_, ok := s.blkRootToPendingAtts[root]
if !ok {
s.blkRootToPendingAtts[root] = []*ethpb.SignedAggregateAttestationAndProof{att}

View File

@@ -425,3 +425,37 @@ func TestValidatePendingAtts_NoDuplicatingAggregatorIndex(t *testing.T) {
assert.Equal(t, 1, len(s.blkRootToPendingAtts[r1]), "Did not save pending atts")
assert.Equal(t, 1, len(s.blkRootToPendingAtts[r2]), "Did not save pending atts")
}
func TestSavePendingAtts_BeyondLimit(t *testing.T) {
s := &Service{
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
}
for i := 0; i < pendingAttsLimit; i++ {
s.savePendingAtt(&ethpb.SignedAggregateAttestationAndProof{
Message: &ethpb.AggregateAttestationAndProof{
AggregatorIndex: primitives.ValidatorIndex(i),
Aggregate: &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 1, BeaconBlockRoot: bytesutil.Bytes32(uint64(i))}}}})
}
r1 := [32]byte(bytesutil.Bytes32(0))
r2 := [32]byte(bytesutil.Bytes32(uint64(pendingAttsLimit) - 1))
assert.Equal(t, 1, len(s.blkRootToPendingAtts[r1]), "Did not save pending atts")
assert.Equal(t, 1, len(s.blkRootToPendingAtts[r2]), "Did not save pending atts")
for i := pendingAttsLimit; i < pendingAttsLimit+20; i++ {
s.savePendingAtt(&ethpb.SignedAggregateAttestationAndProof{
Message: &ethpb.AggregateAttestationAndProof{
AggregatorIndex: primitives.ValidatorIndex(i),
Aggregate: &ethpb.Attestation{
Data: &ethpb.AttestationData{Slot: 1, BeaconBlockRoot: bytesutil.Bytes32(uint64(i))}}}})
}
r1 = [32]byte(bytesutil.Bytes32(uint64(pendingAttsLimit)))
r2 = [32]byte(bytesutil.Bytes32(uint64(pendingAttsLimit) + 10))
assert.Equal(t, 0, len(s.blkRootToPendingAtts[r1]), "Saved pending atts")
assert.Equal(t, 0, len(s.blkRootToPendingAtts[r2]), "Saved pending atts")
}

View File

@@ -248,7 +248,7 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
if len(bestPeers) == 0 {
return nil
}
roots = s.dedupRoots(roots)
roots = dedupRoots(roots)
// Randomly choose a peer to query from our best peers. If that peer cannot return
// all the requested blocks, we randomly select another peer.
pid := bestPeers[randGen.Int()%len(bestPeers)]
@@ -456,3 +456,16 @@ func slotToCacheKey(s primitives.Slot) string {
b := bytesutil.SlotToBytesBigEndian(s)
return string(b)
}
func dedupRoots(roots [][32]byte) [][32]byte {
newRoots := make([][32]byte, 0, len(roots))
rootMap := make(map[[32]byte]bool, len(roots))
for i, r := range roots {
if rootMap[r] {
continue
}
rootMap[r] = true
newRoots = append(newRoots, roots[i])
}
return newRoots
}

View File

@@ -6,7 +6,6 @@ import (
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filters"
p2ptypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v4/config/params"
@@ -26,36 +25,18 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
defer cancel()
SetRPCStreamDeadlines(stream)
// Ticker to stagger out large requests.
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
m, ok := msg.(*pb.BeaconBlocksByRangeRequest)
if !ok {
return errors.New("message is not type *pb.BeaconBlockByRangeRequest")
}
if err := s.validateRangeRequest(m); err != nil {
log.WithField("start-slot", m.StartSlot).WithField("count", m.Count).Debug("BeaconBlocksByRangeRequest")
rp, err := validateRangeRequest(m, s.cfg.clock.CurrentSlot())
if err != nil {
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
tracing.AnnotateError(span, err)
return err
}
// Only have range requests with a step of 1 being processed.
if m.Step > 1 {
m.Step = 1
}
// The initial count for the first batch to be returned back.
count := m.Count
allowedBlocksPerSecond := uint64(flags.Get().BlockBatchLimit)
if count > allowedBlocksPerSecond {
count = allowedBlocksPerSecond
}
// initial batch start and end slots to be returned to remote peer.
startSlot := m.StartSlot
endSlot := startSlot.Add(m.Step * (count - 1))
// The final requested slot from remote peer.
endReqSlot := startSlot.Add(m.Step * (m.Count - 1))
blockLimiter, err := s.rateLimiter.topicCollector(string(stream.Protocol()))
if err != nil {
@@ -63,129 +44,115 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
}
remainingBucketCapacity := blockLimiter.Remaining(stream.Conn().RemotePeer().String())
span.AddAttributes(
trace.Int64Attribute("start", int64(startSlot)), // lint:ignore uintcast -- This conversion is OK for tracing.
trace.Int64Attribute("end", int64(endReqSlot)), // lint:ignore uintcast -- This conversion is OK for tracing.
trace.Int64Attribute("step", int64(m.Step)),
trace.Int64Attribute("start", int64(rp.start)), // lint:ignore uintcast -- This conversion is OK for tracing.
trace.Int64Attribute("end", int64(rp.end)), // lint:ignore uintcast -- This conversion is OK for tracing.
trace.Int64Attribute("count", int64(m.Count)),
trace.StringAttribute("peer", stream.Conn().RemotePeer().Pretty()),
trace.Int64Attribute("remaining_capacity", remainingBucketCapacity),
)
// Ticker to stagger out large requests.
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
batcher, err := newBlockRangeBatcher(rp, s.cfg.beaconDB, s.rateLimiter, s.cfg.chain.IsCanonical, ticker)
if err != nil {
log.WithError(err).Info("error in BlocksByRange batch")
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
tracing.AnnotateError(span, err)
return err
}
// prevRoot is used to ensure that returned chains are strictly linear for singular steps
// by comparing the previous root of the block in the list with the current block's parent.
var prevRoot [32]byte
for startSlot <= endReqSlot {
if err := s.rateLimiter.validateRequest(stream, allowedBlocksPerSecond); err != nil {
tracing.AnnotateError(span, err)
var batch blockBatch
var more bool
for batch, more = batcher.next(ctx, stream); more; batch, more = batcher.next(ctx, stream) {
batchStart := time.Now()
if err := s.writeBlockBatchToStream(ctx, batch, stream); err != nil {
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
return err
}
if endSlot-startSlot > rangeLimit {
s.writeErrorResponseToStream(responseCodeInvalidRequest, p2ptypes.ErrInvalidRequest.Error(), stream)
err := p2ptypes.ErrInvalidRequest
tracing.AnnotateError(span, err)
return err
}
err := s.writeBlockRangeToStream(ctx, startSlot, endSlot, m.Step, &prevRoot, stream)
if err != nil && !errors.Is(err, p2ptypes.ErrInvalidParent) {
return err
}
// Reduce capacity of peer in the rate limiter first.
// Decrease allowed blocks capacity by the number of streamed blocks.
if startSlot <= endSlot {
s.rateLimiter.add(stream, int64(1+endSlot.SubSlot(startSlot).Div(m.Step)))
}
// Exit in the event we have a disjoint chain to
// return.
if errors.Is(err, p2ptypes.ErrInvalidParent) {
break
}
// Recalculate start and end slots for the next batch to be returned to the remote peer.
startSlot = endSlot.Add(m.Step)
endSlot = startSlot.Add(m.Step * (allowedBlocksPerSecond - 1))
if endSlot > endReqSlot {
endSlot = endReqSlot
}
// do not wait if all blocks have already been sent.
if startSlot > endReqSlot {
break
}
// wait for ticker before resuming streaming blocks to remote peer.
<-ticker.C
rpcBlocksByRangeResponseLatency.Observe(float64(time.Since(batchStart).Milliseconds()))
}
if err := batch.error(); err != nil {
log.WithError(err).Info("error in BlocksByRange batch")
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
tracing.AnnotateError(span, err)
return err
}
closeStream(stream, log)
return nil
}
func (s *Service) writeBlockRangeToStream(ctx context.Context, startSlot, endSlot primitives.Slot, step uint64,
prevRoot *[32]byte, stream libp2pcore.Stream) error {
type rangeParams struct {
start primitives.Slot
end primitives.Slot
size uint64
}
func validateRangeRequest(r *pb.BeaconBlocksByRangeRequest, current primitives.Slot) (rangeParams, error) {
rp := rangeParams{
start: r.StartSlot,
size: r.Count,
}
maxRequest := params.BeaconNetworkConfig().MaxRequestBlocks
// Ensure all request params are within appropriate bounds
if rp.size == 0 || rp.size > maxRequest {
return rangeParams{}, p2ptypes.ErrInvalidRequest
}
// Allow some wiggle room, up to double the MaxRequestBlocks past the current slot,
// to give nodes syncing close to the head of the chain some margin for error.
maxStart, err := current.SafeAdd(maxRequest * 2)
if err != nil {
return rangeParams{}, p2ptypes.ErrInvalidRequest
}
if rp.start > maxStart {
return rangeParams{}, p2ptypes.ErrInvalidRequest
}
rp.end, err = rp.start.SafeAdd((rp.size - 1))
if err != nil {
return rangeParams{}, p2ptypes.ErrInvalidRequest
}
limit := uint64(flags.Get().BlockBatchLimit)
if limit > maxRequest {
limit = maxRequest
}
if rp.size > limit {
rp.size = limit
}
return rp, nil
}
func (s *Service) writeBlockBatchToStream(ctx context.Context, batch blockBatch, stream libp2pcore.Stream) error {
ctx, span := trace.StartSpan(ctx, "sync.WriteBlockRangeToStream")
defer span.End()
filter := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(endSlot).SetSlotStep(step)
blks, roots, err := s.cfg.beaconDB.Blocks(ctx, filter)
if err != nil {
log.WithError(err).Debug("Could not retrieve blocks")
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
tracing.AnnotateError(span, err)
return err
}
// handle genesis case
if startSlot == 0 {
genBlock, genRoot, err := s.retrieveGenesisBlock(ctx)
if err != nil {
log.WithError(err).Debug("Could not retrieve genesis block")
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
tracing.AnnotateError(span, err)
return err
blinded := make([]interfaces.ReadOnlySignedBeaconBlock, 0)
for _, b := range batch.canonical() {
if err := blocks.BeaconBlockIsNil(b); err != nil {
continue
}
blks = append([]interfaces.ReadOnlySignedBeaconBlock{genBlock}, blks...)
roots = append([][32]byte{genRoot}, roots...)
}
// Filter and sort our retrieved blocks, so that
// we only return valid sets of blocks.
blks, roots, err = s.dedupBlocksAndRoots(blks, roots)
if err != nil {
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
tracing.AnnotateError(span, err)
return err
}
blks, roots = s.sortBlocksAndRoots(blks, roots)
blks, err = s.filterBlocks(ctx, blks, roots, prevRoot, step, startSlot)
if err != nil && err != p2ptypes.ErrInvalidParent {
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
tracing.AnnotateError(span, err)
return err
}
start := time.Now()
// If the blocks are blinded, we reconstruct the full block via the execution client.
blindedExists := false
blindedIndex := 0
for i, b := range blks {
// Since the blocks are sorted in ascending order, we assume that the following
// blocks from the first blinded block are also ascending.
if b.IsBlinded() {
blindedExists = true
blindedIndex = i
break
blinded = append(blinded, b.ReadOnlySignedBeaconBlock)
continue
}
if chunkErr := s.chunkBlockWriter(stream, b); chunkErr != nil {
log.WithError(chunkErr).Error("Could not send a chunked response")
return chunkErr
}
}
var reconstructedBlock []interfaces.SignedBeaconBlock
if blindedExists {
reconstructedBlock, err = s.cfg.executionPayloadReconstructor.ReconstructFullBellatrixBlockBatch(ctx, blks[blindedIndex:])
if err != nil {
log.WithError(err).Error("Could not reconstruct full bellatrix block batch from blinded bodies")
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
return err
}
if len(blinded) == 0 {
return nil
}
for _, b := range blks {
reconstructed, err := s.cfg.executionPayloadReconstructor.ReconstructFullBellatrixBlockBatch(ctx, blinded)
if err != nil {
log.WithError(err).Error("Could not reconstruct full bellatrix block batch from blinded bodies")
return err
}
for _, b := range reconstructed {
if err := blocks.BeaconBlockIsNil(b); err != nil {
continue
}
@@ -194,115 +161,9 @@ func (s *Service) writeBlockRangeToStream(ctx context.Context, startSlot, endSlo
}
if chunkErr := s.chunkBlockWriter(stream, b); chunkErr != nil {
log.WithError(chunkErr).Debug("Could not send a chunked response")
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
tracing.AnnotateError(span, chunkErr)
return chunkErr
}
}
for _, b := range reconstructedBlock {
if err := blocks.BeaconBlockIsNil(b); err != nil {
continue
}
if b.IsBlinded() {
continue
}
if chunkErr := s.chunkBlockWriter(stream, b); chunkErr != nil {
log.WithError(chunkErr).Debug("Could not send a chunked response")
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
tracing.AnnotateError(span, chunkErr)
return chunkErr
}
}
rpcBlocksByRangeResponseLatency.Observe(float64(time.Since(start).Milliseconds()))
// Return error in the event we have an invalid parent.
return err
}
func (s *Service) validateRangeRequest(r *pb.BeaconBlocksByRangeRequest) error {
startSlot := r.StartSlot
count := r.Count
step := r.Step
maxRequestBlocks := params.BeaconNetworkConfig().MaxRequestBlocks
// Add a buffer for possible large range requests from nodes syncing close to the
// head of the chain.
buffer := rangeLimit * 2
highestExpectedSlot := s.cfg.clock.CurrentSlot().Add(uint64(buffer))
// Ensure all request params are within appropriate bounds
if count == 0 || count > maxRequestBlocks {
return p2ptypes.ErrInvalidRequest
}
if step == 0 || step > rangeLimit {
return p2ptypes.ErrInvalidRequest
}
if startSlot > highestExpectedSlot {
return p2ptypes.ErrInvalidRequest
}
endSlot := startSlot.Add(step * (count - 1))
if endSlot-startSlot > rangeLimit {
return p2ptypes.ErrInvalidRequest
}
return nil
}
// filters all the provided blocks to ensure they are canonical
// and are strictly linear.
func (s *Service) filterBlocks(ctx context.Context, blks []interfaces.ReadOnlySignedBeaconBlock, roots [][32]byte, prevRoot *[32]byte,
step uint64, startSlot primitives.Slot) ([]interfaces.ReadOnlySignedBeaconBlock, error) {
if len(blks) != len(roots) {
return nil, errors.New("input blks and roots are diff lengths")
}
newBlks := make([]interfaces.ReadOnlySignedBeaconBlock, 0, len(blks))
for i, b := range blks {
isCanonical, err := s.cfg.chain.IsCanonical(ctx, roots[i])
if err != nil {
return nil, err
}
parentValid := *prevRoot != [32]byte{}
isLinear := *prevRoot == b.Block().ParentRoot()
isSingular := step == 1
slotDiff, err := b.Block().Slot().SafeSubSlot(startSlot)
if err != nil {
return nil, err
}
slotDiff, err = slotDiff.SafeMod(step)
if err != nil {
return nil, err
}
isRequestedSlotStep := slotDiff == 0
if isRequestedSlotStep && isCanonical {
// Exit early if our valid block is non linear.
if parentValid && isSingular && !isLinear {
return newBlks, p2ptypes.ErrInvalidParent
}
newBlks = append(newBlks, blks[i])
// Set the previous root as the
// newly added block's root
currRoot := roots[i]
*prevRoot = currRoot
}
}
return newBlks, nil
}
func (s *Service) writeErrorResponseToStream(responseCode byte, reason string, stream libp2pcore.Stream) {
writeErrorResponseToStream(responseCode, reason, stream, s.cfg.p2p)
}
func (s *Service) retrieveGenesisBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, [32]byte, error) {
genBlock, err := s.cfg.beaconDB.GenesisBlock(ctx)
if err != nil {
return nil, [32]byte{}, err
}
genRoot, err := genBlock.Block().HashTreeRoot()
if err != nil {
return nil, [32]byte{}, err
}
return genBlock, genRoot, nil
}

View File

@@ -25,7 +25,6 @@ import (
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
leakybucket "github.com/prysmaticlabs/prysm/v4/container/leaky-bucket"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
@@ -63,14 +62,9 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsBlocks(t *testing.T) {
util.SaveBlock(t, context.Background(), d, blk)
}
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
r := &Service{
cfg: &config{
p2p: p1, beaconDB: d, chain: &chainMock.ChainService{},
clock: startup.NewClock(time.Unix(0, 0), [32]byte{}),
},
rateLimiter: newRateLimiter(p1),
}
r := &Service{cfg: &config{p2p: p1, beaconDB: d, clock: clock, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
topic := string(pcl)
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(req.Count*10), time.Second, false)
@@ -131,8 +125,8 @@ func TestRPCBeaconBlocksByRange_ReturnCorrectNumberBack(t *testing.T) {
}
require.NoError(t, d.SaveGenesisBlockRoot(context.Background(), genRoot))
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)}
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
topic := string(pcl)
@@ -240,14 +234,15 @@ func TestRPCBeaconBlocksByRange_ReconstructsPayloads(t *testing.T) {
}
require.NoError(t, d.SaveGenesisBlockRoot(context.Background(), genRoot))
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
r := &Service{
cfg: &config{
p2p: p1,
beaconDB: d,
chain: &chainMock.ChainService{},
clock: clock,
executionPayloadReconstructor: mockEngine,
clock: startup.NewClock(time.Unix(0, 0), [32]byte{}),
},
rateLimiter: newRateLimiter(p1),
}
@@ -317,9 +312,9 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsSortedBlocks(t *testing.T) {
j++
}
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)}
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
r := &Service{cfg: &config{p2p: p1, beaconDB: d, clock: clock, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
topic := string(pcl)
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(req.Count*10), time.Second, false)
@@ -384,7 +379,8 @@ func TestRPCBeaconBlocksByRange_ReturnsGenesisBlock(t *testing.T) {
prevRoot = rt
}
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: startup.NewClock(time.Unix(0, 0), [32]byte{})}, rateLimiter: newRateLimiter(p1)}
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
r := &Service{cfg: &config{p2p: p1, beaconDB: d, clock: clock, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
topic := string(pcl)
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, time.Second, false)
@@ -503,7 +499,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
capacity := int64(flags.Get().BlockBatchLimit * 3)
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)}
r := &Service{cfg: &config{p2p: p1, beaconDB: d, clock: clock, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
topic := string(pcl)
@@ -534,7 +530,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
capacity := int64(flags.Get().BlockBatchLimit * flags.Get().BlockBatchLimitBurstFactor)
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)}
r := &Service{cfg: &config{p2p: p1, beaconDB: d, clock: clock, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
topic := string(pcl)
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, capacity, time.Second, false)
@@ -565,15 +561,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
slotsSinceGenesis := primitives.Slot(1000)
offset := int64(slotsSinceGenesis.Mul(params.BeaconConfig().SecondsPerSlot))
chain := &chainMock.ChainService{
Genesis: time.Now().Add(time.Second * time.Duration(-1*offset)),
}
r := &Service{
cfg: &config{
chain: chain,
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
},
}
clock := startup.NewClock(time.Now().Add(time.Second*time.Duration(-1*offset)), [32]byte{})
tests := []struct {
name string
@@ -613,8 +601,7 @@ func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
Step: 0,
Count: 1,
},
expectedError: p2ptypes.ErrInvalidRequest,
errorToLog: "validation did not fail with bad step",
expectedError: nil, // The Step param is ignored in v2 RPC
},
{
name: "Over limit Step",
@@ -622,8 +609,7 @@ func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
Step: rangeLimit + 1,
Count: 1,
},
expectedError: p2ptypes.ErrInvalidRequest,
errorToLog: "validation did not fail with bad step",
expectedError: nil, // The Step param is ignored in v2 RPC
},
{
name: "Correct Step",
@@ -658,8 +644,7 @@ func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
Step: 3,
Count: uint64(slotsSinceGenesis / 2),
},
expectedError: p2ptypes.ErrInvalidRequest,
errorToLog: "validation did not fail with bad range",
expectedError: nil, // this is fine with the deprecation of Step
},
{
name: "Valid Request",
@@ -674,10 +659,11 @@ func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := validateRangeRequest(tt.req, clock.CurrentSlot())
if tt.expectedError != nil {
assert.ErrorContains(t, tt.expectedError.Error(), r.validateRangeRequest(tt.req), tt.errorToLog)
assert.ErrorContains(t, tt.expectedError.Error(), err, tt.errorToLog)
} else {
assert.NoError(t, r.validateRangeRequest(tt.req), tt.errorToLog)
assert.NoError(t, err, tt.errorToLog)
}
})
}
@@ -905,7 +891,7 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)}
r := &Service{cfg: &config{p2p: p1, beaconDB: d, clock: clock, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, time.Second, false)
req := &ethpb.BeaconBlocksByRangeRequest{
StartSlot: 1,
@@ -937,7 +923,7 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)}
r := &Service{cfg: &config{p2p: p1, beaconDB: d, clock: clock, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, time.Second, false)
req := &ethpb.BeaconBlocksByRangeRequest{
StartSlot: 1,
@@ -971,7 +957,6 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
p1.Connect(p2)
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, rateLimiter: newRateLimiter(p1)}
r.rateLimiter.limiterMap[string(pcl)] = leakybucket.NewCollector(0.000001, 640, time.Second, false)
@@ -1087,12 +1072,6 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
}
func TestRPCBeaconBlocksByRange_FilterBlocks_PreviousRoot(t *testing.T) {
p1 := p2ptest.NewTestP2P(t)
p2 := p2ptest.NewTestP2P(t)
p1.Connect(p2)
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
d := db.SetupDB(t)
req := &ethpb.BeaconBlocksByRangeRequest{
StartSlot: 100,
Step: 1,
@@ -1102,8 +1081,7 @@ func TestRPCBeaconBlocksByRange_FilterBlocks_PreviousRoot(t *testing.T) {
// Populate the database with blocks that would match the request.
var prevRoot [32]byte
var err error
blks := []interfaces.ReadOnlySignedBeaconBlock{}
var roots [][32]byte
var blks []blocks.ROBlock
for i := req.StartSlot; i < req.StartSlot.Add(req.Count); i += primitives.Slot(1) {
blk := util.NewBeaconBlock()
blk.Block.Slot = i
@@ -1112,21 +1090,19 @@ func TestRPCBeaconBlocksByRange_FilterBlocks_PreviousRoot(t *testing.T) {
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
blks = append(blks, wsb)
copiedRt := prevRoot
roots = append(roots, copiedRt)
b, err := blocks.NewROBlockWithRoot(wsb, copiedRt)
require.NoError(t, err)
blks = append(blks, b)
}
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
var initialRoot [32]byte
ptrRt := &initialRoot
newBlks, err := r.filterBlocks(context.Background(), blks, roots, ptrRt, req.Step, req.StartSlot)
chain := &chainMock.ChainService{}
cf := canonicalFilter{canonical: chain.IsCanonical}
seq, nseq, err := cf.filter(context.Background(), blks)
require.NoError(t, err)
require.Equal(t, len(blks), len(newBlks))
require.Equal(t, len(blks), len(seq))
require.Equal(t, 0, len(nseq))
// pointer should reference a new root.
require.NotEqual(t, *ptrRt, [32]byte{})
require.NotEqual(t, cf.prevRoot, [32]byte{})
}

View File

@@ -0,0 +1,56 @@
package sync
import (
"context"
"testing"
"time"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/protocol"
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
)
type rpcHandlerTest struct {
t *testing.T
topic protocol.ID
timeout time.Duration
err error
s *Service
}
func (rt *rpcHandlerTest) testHandler(nh network.StreamHandler, rh rpcHandler, rhi interface{}) {
ctx, cancel := context.WithTimeout(context.Background(), rt.timeout)
defer func() {
cancel()
}()
w := util.NewWaiter()
server := p2ptest.NewTestP2P(rt.t)
client, ok := rt.s.cfg.p2p.(*p2ptest.TestP2P)
require.Equal(rt.t, true, ok)
client.Connect(server)
defer func() {
require.NoError(rt.t, client.Disconnect(server.PeerID()))
}()
require.Equal(rt.t, 1, len(client.BHost.Network().Peers()), "Expected peers to be connected")
h := func(stream network.Stream) {
defer w.Done()
nh(stream)
}
server.BHost.SetStreamHandler(protocol.ID(rt.topic), h)
stream, err := client.BHost.NewStream(ctx, server.BHost.ID(), protocol.ID(rt.topic))
require.NoError(rt.t, err)
err = rh(ctx, rhi, stream)
if rt.err == nil {
require.NoError(rt.t, err)
} else {
require.ErrorIs(rt.t, err, rt.err)
}
w.RequireDoneBeforeCancel(ctx, rt.t)
}

View File

@@ -11,6 +11,7 @@ import (
lru "github.com/hashicorp/golang-lru"
pubsub "github.com/libp2p/go-libp2p-pubsub"
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
gcache "github.com/patrickmn/go-cache"
@@ -41,7 +42,7 @@ import (
var _ runtime.Service = (*Service)(nil)
const rangeLimit = 1024
const rangeLimit uint64 = 1024
const seenBlockSize = 1000
const seenUnaggregatedAttSize = 20000
const seenAggregatedAttSize = 1024
@@ -272,6 +273,10 @@ func (s *Service) registerHandlers() {
}
}
func (s *Service) writeErrorResponseToStream(responseCode byte, reason string, stream libp2pcore.Stream) {
writeErrorResponseToStream(responseCode, reason, stream, s.cfg.p2p)
}
// marks the chain as having started.
func (s *Service) markForChainStart() {
s.chainStarted.Set()

View File

@@ -1,77 +0,0 @@
package sync
import (
"errors"
"sort"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
)
// A type to represent beacon blocks and roots which have methods
// which satisfy the Interface in `Sort` so that this type can
// be sorted in ascending order.
type sortedObj struct {
blks []interfaces.ReadOnlySignedBeaconBlock
roots [][32]byte
}
// Less reports whether the element with index i must sort before the element with index j.
func (s sortedObj) Less(i, j int) bool {
return s.blks[i].Block().Slot() < s.blks[j].Block().Slot()
}
// Swap swaps the elements with indexes i and j.
func (s sortedObj) Swap(i, j int) {
s.blks[i], s.blks[j] = s.blks[j], s.blks[i]
s.roots[i], s.roots[j] = s.roots[j], s.roots[i]
}
// Len is the number of elements in the collection.
func (s sortedObj) Len() int {
return len(s.blks)
}
// removes duplicates from provided blocks and roots.
func (_ *Service) dedupBlocksAndRoots(blks []interfaces.ReadOnlySignedBeaconBlock, roots [][32]byte) ([]interfaces.ReadOnlySignedBeaconBlock, [][32]byte, error) {
if len(blks) != len(roots) {
return nil, nil, errors.New("input blks and roots are diff lengths")
}
// Remove duplicate blocks received
rootMap := make(map[[32]byte]bool, len(blks))
newBlks := make([]interfaces.ReadOnlySignedBeaconBlock, 0, len(blks))
newRoots := make([][32]byte, 0, len(roots))
for i, r := range roots {
if rootMap[r] {
continue
}
rootMap[r] = true
newRoots = append(newRoots, roots[i])
newBlks = append(newBlks, blks[i])
}
return newBlks, newRoots, nil
}
func (_ *Service) dedupRoots(roots [][32]byte) [][32]byte {
newRoots := make([][32]byte, 0, len(roots))
rootMap := make(map[[32]byte]bool, len(roots))
for i, r := range roots {
if rootMap[r] {
continue
}
rootMap[r] = true
newRoots = append(newRoots, roots[i])
}
return newRoots
}
// sort the provided blocks and roots in ascending order. This method assumes that the size of
// block slice and root slice is equal.
func (_ *Service) sortBlocksAndRoots(blks []interfaces.ReadOnlySignedBeaconBlock, roots [][32]byte) ([]interfaces.ReadOnlySignedBeaconBlock, [][32]byte) {
obj := sortedObj{
blks: blks,
roots: roots,
}
sort.Sort(obj)
return obj.blks, obj.roots
}

View File

@@ -1,85 +0,0 @@
package sync
import (
"math/rand"
"testing"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestSortedObj_SortBlocksRoots(t *testing.T) {
source := rand.NewSource(33)
randGen := rand.New(source)
var blks []interfaces.ReadOnlySignedBeaconBlock
var roots [][32]byte
randFunc := func() int64 {
return randGen.Int63n(50)
}
for i := 0; i < 10; i++ {
slot := primitives.Slot(randFunc())
newBlk, err := blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: slot, Body: &ethpb.BeaconBlockBody{}}})
require.NoError(t, err)
blks = append(blks, newBlk)
root := bytesutil.ToBytes32(bytesutil.Bytes32(uint64(slot)))
roots = append(roots, root)
}
r := &Service{}
newBlks, newRoots := r.sortBlocksAndRoots(blks, roots)
previousSlot := primitives.Slot(0)
for i, b := range newBlks {
if b.Block().Slot() < previousSlot {
t.Errorf("Block list is not sorted as %d is smaller than previousSlot %d", b.Block().Slot(), previousSlot)
}
if bytesutil.FromBytes8(newRoots[i][:]) != uint64(b.Block().Slot()) {
t.Errorf("root doesn't match stored slot in block: wanted %d but got %d", b.Block().Slot(), bytesutil.FromBytes8(newRoots[i][:]))
}
previousSlot = b.Block().Slot()
}
}
func TestSortedObj_NoDuplicates(t *testing.T) {
source := rand.NewSource(33)
randGen := rand.New(source)
var blks []interfaces.ReadOnlySignedBeaconBlock
var roots [][32]byte
randFunc := func() int64 {
return randGen.Int63n(50)
}
for i := 0; i < 10; i++ {
slot := primitives.Slot(randFunc())
newBlk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: slot, Body: &ethpb.BeaconBlockBody{}}}
// append twice
wsb, err := blocks.NewSignedBeaconBlock(newBlk)
require.NoError(t, err)
wsbCopy, err := wsb.Copy()
require.NoError(t, err)
blks = append(blks, wsb, wsbCopy)
// append twice
root := bytesutil.ToBytes32(bytesutil.Bytes32(uint64(slot)))
roots = append(roots, root, root)
}
r := &Service{}
newBlks, newRoots, err := r.dedupBlocksAndRoots(blks, roots)
require.NoError(t, err)
rootMap := make(map[[32]byte]bool)
for i, b := range newBlks {
if rootMap[newRoots[i]] {
t.Errorf("Duplicated root exists %#x with block %v", newRoots[i], b)
}
rootMap[newRoots[i]] = true
}
}

View File

@@ -141,12 +141,12 @@ func (s *Service) hasSeenSyncMessageIndexSlot(ctx context.Context, m *ethpb.Sync
return true // Impossible. Return true to be safe
}
if !s.cfg.chain.InForkchoice(root) && !s.cfg.beaconDB.HasBlock(ctx, root) {
syncMessagesForUnkownBlocks.Inc()
syncMessagesForUnknownBlocks.Inc()
return true
}
msgRoot := [32]byte(m.BlockRoot)
if !s.cfg.chain.InForkchoice(msgRoot) && !s.cfg.beaconDB.HasBlock(ctx, msgRoot) {
syncMessagesForUnkownBlocks.Inc()
syncMessagesForUnknownBlocks.Inc()
return false
}
headRoot := s.cfg.chain.CachedHeadRoot()

View File

@@ -65,10 +65,7 @@ container_image(
container_bundle(
name = "image_bundle",
images = {
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest": ":image_with_creation_time",
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}": ":image_with_creation_time",
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest": ":image_with_creation_time",
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}": ":image_with_creation_time",
"gcr.io/prysmaticlabs/prysm/beacon-chain:rcmgrTesting": ":image_with_creation_time",
},
tags = ["manual"],
visibility = ["//beacon-chain:__pkg__"],
@@ -83,12 +80,6 @@ go_image_debug(
container_bundle(
name = "image_bundle_debug",
images = {
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest-debug": ":image_debug",
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}-debug": ":image_debug",
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest-debug": ":image_debug",
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}-debug": ":image_debug",
},
tags = ["manual"],
visibility = ["//beacon-chain:__pkg__"],
)
@@ -102,12 +93,6 @@ go_image_alpine(
container_bundle(
name = "image_bundle_alpine",
images = {
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest-alpine": ":image_alpine",
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}-alpine": ":image_alpine",
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest-alpine": ":image_alpine",
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}-alpine": ":image_alpine",
},
tags = ["manual"],
visibility = ["//beacon-chain:__pkg__"],
)
@@ -119,20 +104,6 @@ docker_push(
visibility = ["//beacon-chain:__pkg__"],
)
docker_push(
name = "push_images_debug",
bundle = ":image_bundle_debug",
tags = ["manual"],
visibility = ["//beacon-chain:__pkg__"],
)
docker_push(
name = "push_images_alpine",
bundle = ":image_bundle_alpine",
tags = ["manual"],
visibility = ["//beacon-chain:__pkg__"],
)
go_binary(
name = "beacon-chain",
embed = [":go_default_library"],

View File

@@ -9,6 +9,7 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v4/cmd/prysmctl/checkpointsync",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//api/client/beacon:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",

View File

@@ -5,6 +5,7 @@ import (
"os"
"time"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/api/client/beacon"
log "github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
@@ -45,7 +46,7 @@ func cliActionDownload(_ *cli.Context) error {
ctx := context.Background()
f := downloadFlags
opts := []beacon.ClientOpt{beacon.WithTimeout(f.Timeout)}
opts := []client.ClientOpt{client.WithTimeout(f.Timeout)}
client, err := beacon.NewClient(downloadFlags.BeaconNodeHost, opts...)
if err != nil {
return err

View File

@@ -190,7 +190,7 @@ func cliActionRequestBlocks(cliCtx *cli.Context) error {
for _, blk := range blocks {
exec, err := blk.Block().Body().Execution()
switch {
case errors.Is(err, consensus_types.ErrUnsupportedGetter):
case errors.Is(err, consensus_types.ErrUnsupportedField):
continue
case err != nil:
log.WithError(err).Error("Could not read execution data from block body")
@@ -199,7 +199,7 @@ func cliActionRequestBlocks(cliCtx *cli.Context) error {
}
_, err = exec.Transactions()
switch {
case errors.Is(err, consensus_types.ErrUnsupportedGetter):
case errors.Is(err, consensus_types.ErrUnsupportedField):
continue
case err != nil:
log.WithError(err).Error("Could not read transactions block execution payload")

View File

@@ -4,19 +4,29 @@ go_library(
name = "go_default_library",
srcs = [
"cmd.go",
"error.go",
"proposer_settings.go",
"withdraw.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/cmd/prysmctl/validator",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//api/client/beacon:go_default_library",
"//api/client/validator:go_default_library",
"//beacon-chain/rpc/apimiddleware:go_default_library",
"//cmd:go_default_library",
"//cmd/validator/accounts:go_default_library",
"//cmd/validator/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//encoding/bytesutil:go_default_library",
"//io/file:go_default_library",
"//io/prompt:go_default_library",
"//proto/prysm/v1alpha1/validator-client:go_default_library",
"//runtime/tos:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_logrusorgru_aurora//:go_default_library",
@@ -29,7 +39,10 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["withdraw_test.go"],
srcs = [
"proposer_settings_test.go",
"withdraw_test.go",
],
data = glob(["testdata/**"]),
embed = [":go_default_library"],
deps = [
@@ -37,6 +50,7 @@ go_test(
"//config/params:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//validator/rpc/apimiddleware:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",

View File

@@ -20,22 +20,56 @@ var (
Usage: "host:port for beacon node to query",
Value: "127.0.0.1:3500",
}
PathFlag = &cli.StringFlag{
Name: "path",
Aliases: []string{"p"},
Usage: "path to the signed withdrawal messages JSON",
}
ConfirmFlag = &cli.BoolFlag{
Name: "confirm",
Aliases: []string{"c"},
Usage: "WARNING: User confirms and accepts responsibility of all input data provided and actions for setting their withdrawal address for their validator key. " +
"This action is not reversible and withdrawal addresses can not be changed once set.",
}
VerifyOnlyFlag = &cli.BoolFlag{
Name: "verify-only",
Aliases: []string{"vo"},
Usage: "overrides withdrawal command to only verify whether requests are in the pool and does not submit withdrawal requests",
}
ValidatorHostFlag = &cli.StringFlag{
Name: "validator-host",
Aliases: []string{"vch"},
Usage: "host:port for validator client.",
Value: "http://127.0.0.1:7500",
}
ProposerSettingsOutputFlag = &cli.StringFlag{
Name: "output-proposer-settings-path",
Aliases: []string{"settings-path"},
Usage: "path to outputting a proposer settings file ( i.e. ./path/to/proposer-settings.json), file does not include builder settings and will need to be added for advanced users using those features",
}
WithBuilderFlag = &cli.BoolFlag{
Name: "with-builder",
Aliases: []string{"wb"},
Usage: "adds default builder options to proposer settings output, used for enabling mev-boost and relays",
}
DefaultFeeRecipientFlag = &cli.StringFlag{
Name: "default-fee-recipient",
Aliases: []string{"dfr"},
Usage: "default fee recipient used for proposer-settings, only used with --output-proposer-settings-path",
}
TokenFlag = &cli.StringFlag{
Name: "token",
Aliases: []string{"t"},
Usage: "keymanager API bearer token, note: currently required but may be removed in the future, this is the same token as the web ui token.",
}
)
var Commands = []*cli.Command{
@@ -89,6 +123,27 @@ var Commands = []*cli.Command{
return nil
},
},
{
Name: "proposer-settings",
Aliases: []string{"w"},
Usage: "Display or recreate currently used proposer settings.",
Flags: []cli.Flag{
cmd.ConfigFileFlag,
DefaultFeeRecipientFlag,
TokenFlag,
ValidatorHostFlag,
ProposerSettingsOutputFlag,
},
Before: func(cliCtx *cli.Context) error {
return cmd.LoadFlagsFromConfig(cliCtx, cliCtx.Command.Flags)
},
Action: func(cliCtx *cli.Context) error {
if err := getProposerSettings(cliCtx, os.Stdin); err != nil {
log.WithError(err).Fatal("Could not get proposer settings")
}
return nil
},
},
{
Name: "exit",
Aliases: []string{"e", "voluntary-exit"},

View File

@@ -0,0 +1,8 @@
package validator
import "fmt"
// ErrNoFlag takes a flag name and returns a formatted error representing no flag was provided.
func errNoFlag(flagName string) error {
return fmt.Errorf("no --%s flag value was provided", flagName)
}

View File

@@ -0,0 +1,112 @@
package validator
import (
"encoding/json"
"errors"
"io"
"github.com/ethereum/go-ethereum/common"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/api/client/validator"
"github.com/prysmaticlabs/prysm/v4/cmd/validator/flags"
"github.com/prysmaticlabs/prysm/v4/config/params"
validatorType "github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/io/file"
"github.com/prysmaticlabs/prysm/v4/io/prompt"
validatorpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/validator-client"
log "github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
"go.opencensus.io/trace"
)
func getProposerSettings(c *cli.Context, r io.Reader) error {
ctx, span := trace.StartSpan(c.Context, "prysmctl.getProposerSettings")
defer span.End()
if !c.IsSet(ValidatorHostFlag.Name) {
return errNoFlag(ValidatorHostFlag.Name)
}
if !c.IsSet(TokenFlag.Name) {
return errNoFlag(TokenFlag.Name)
}
defaultFeeRecipient := params.BeaconConfig().DefaultFeeRecipient.Hex()
if c.IsSet(ProposerSettingsOutputFlag.Name) {
if c.IsSet(DefaultFeeRecipientFlag.Name) {
recipient := c.String(DefaultFeeRecipientFlag.Name)
if err := validateIsExecutionAddress(recipient); err != nil {
return err
}
defaultFeeRecipient = recipient
} else {
promptText := "Please enter a default fee recipient address (an ethereum address in hex format)"
resp, err := prompt.ValidatePrompt(r, promptText, validateIsExecutionAddress)
if err != nil {
return err
}
defaultFeeRecipient = resp
}
}
cl, err := validator.NewClient(c.String(ValidatorHostFlag.Name), client.WithAuthenticationToken(c.String(TokenFlag.Name)))
if err != nil {
return err
}
validators, err := cl.GetValidatorPubKeys(ctx)
if err != nil {
return err
}
feeRecipients, err := cl.GetFeeRecipientAddresses(ctx, validators)
if err != nil {
return err
}
log.Infoln("===============DISPLAYING CURRENT PROPOSER SETTINGS===============")
for index := range validators {
log.Infof("Validator: %s. Fee-recipient: %s", validators[index], feeRecipients[index])
}
if c.IsSet(ProposerSettingsOutputFlag.Name) {
log.Infof("The default fee recipient is set to %s", defaultFeeRecipient)
var builderSettings *validatorpb.BuilderConfig
if c.Bool(WithBuilderFlag.Name) {
builderSettings = &validatorpb.BuilderConfig{
Enabled: true,
GasLimit: validatorType.Uint64(params.BeaconConfig().DefaultBuilderGasLimit),
}
} else {
log.Infof("Default builder settings can be included with the `--%s` flag", WithBuilderFlag.Name)
}
proposerConfig := make(map[string]*validatorpb.ProposerOptionPayload)
for index, val := range validators {
proposerConfig[val] = &validatorpb.ProposerOptionPayload{
FeeRecipient: feeRecipients[index],
Builder: builderSettings,
}
}
fileConfig := &validatorpb.ProposerSettingsPayload{
ProposerConfig: proposerConfig,
DefaultConfig: &validatorpb.ProposerOptionPayload{
FeeRecipient: defaultFeeRecipient,
Builder: builderSettings,
},
}
b, err := json.Marshal(fileConfig)
if err != nil {
return err
}
if err := file.WriteFile(c.String(ProposerSettingsOutputFlag.Name), b); err != nil {
return err
}
log.Infof("Successfully created `%s`. Settings can be imported into validator client using --%s flag.", c.String(ProposerSettingsOutputFlag.Name), flags.ProposerSettingsFlag.Name)
}
return nil
}
func validateIsExecutionAddress(input string) error {
if !bytesutil.IsHex([]byte(input)) || !(len(input) == common.AddressLength*2+2) {
return errors.New("no default address entered")
}
return nil
}

View File

@@ -0,0 +1,125 @@
package validator
import (
"encoding/json"
"flag"
"fmt"
"net"
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/validator/rpc/apimiddleware"
logtest "github.com/sirupsen/logrus/hooks/test"
"github.com/urfave/cli/v2"
)
func getValidatorHappyPathTestServer(t *testing.T) *httptest.Server {
key1 := "0x855ae9c6184d6edd46351b375f16f541b2d33b0ed0da9be4571b13938588aee840ba606a946f0e8023ae3a4b2a43b4d4"
key2 := "0x844ae9c6184d6edd46351b375f16f541b2d33b0ed0da9be4571b13938588aee840ba606a946f0e8023ae3a4b2a43b4d4"
address1 := "0xb698D697092822185bF0311052215d5B5e1F3944"
return httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
w.Header().Set("Content-Type", "application/json")
if r.Method == http.MethodGet {
if r.RequestURI == "/eth/v1/keystores" {
err := json.NewEncoder(w).Encode(&apimiddleware.ListKeystoresResponseJson{
Keystores: []*apimiddleware.KeystoreJson{
{
ValidatingPubkey: key1,
},
{
ValidatingPubkey: key2,
},
},
})
require.NoError(t, err)
} else if r.RequestURI == "/eth/v1/remotekeys" {
err := json.NewEncoder(w).Encode(&apimiddleware.ListRemoteKeysResponseJson{
Keystores: []*apimiddleware.RemoteKeysListJson{
{
Pubkey: key1,
},
},
})
require.NoError(t, err)
} else if r.RequestURI[strings.LastIndex(r.RequestURI, "/")+1:] == "feerecipient" {
pathSeg := strings.Split(r.RequestURI, "/")
validatorKey := pathSeg[len(pathSeg)-2]
feeMap := map[string]string{
key1: address1,
key2: address1,
}
address, ok := feeMap[validatorKey]
require.Equal(t, ok, true)
err := json.NewEncoder(w).Encode(&apimiddleware.GetFeeRecipientByPubkeyResponseJson{
Data: &apimiddleware.FeeRecipientJson{
Pubkey: validatorKey,
Ethaddress: address,
},
})
require.NoError(t, err)
}
}
}))
}
func TestGetProposerSettings(t *testing.T) {
file := "./testdata/settings.json"
baseurl := "127.0.0.1:3500"
l, err := net.Listen("tcp", baseurl)
require.NoError(t, err)
srv := getValidatorHappyPathTestServer(t)
err = srv.Listener.Close()
require.NoError(t, err)
srv.Listener = l
srv.Start()
defer srv.Close()
hook := logtest.NewGlobal()
defaultfeerecipient := "0xb698D697092822185bF0311052215d5B5e1F3944"
token := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.e30.VXjrSItV_Kmwg_XilpscyPm2SPIsstytYLtr_AuJI8I"
app := cli.App{}
set := flag.NewFlagSet("test", 0)
set.String("validator-host", baseurl, "")
set.String("output-proposer-settings-path", file, "")
set.String("default-fee-recipient", defaultfeerecipient, "")
set.String("token", token, "")
set.Bool("with-builder", true, "")
assert.NoError(t, set.Set("validator-host", baseurl))
assert.NoError(t, set.Set("output-proposer-settings-path", file))
assert.NoError(t, set.Set("default-fee-recipient", defaultfeerecipient))
assert.NoError(t, set.Set("token", token))
cliCtx := cli.NewContext(&app, set, nil)
err = getProposerSettings(cliCtx, os.Stdin)
require.NoError(t, err)
assert.LogsContain(t, hook, fmt.Sprintf("fee recipient is set to %s", defaultfeerecipient))
assert.LogsContain(t, hook, "Successfully created")
// clean up created file
err = os.Remove(file)
require.NoError(t, err)
}
func TestValidateValidateIsExecutionAddress(t *testing.T) {
t.Run("Happy Path", func(t *testing.T) {
err := validateIsExecutionAddress("0xb698D697092822185bF0311052215d5B5e1F3933")
require.NoError(t, err)
})
t.Run("Too Long", func(t *testing.T) {
err := validateIsExecutionAddress("0xb698D697092822185bF0311052215d5B5e1F39331")
require.ErrorContains(t, "no default address entered", err)
})
t.Run("Too Short", func(t *testing.T) {
err := validateIsExecutionAddress("0xb698D697092822185bF0311052215d5B5e1F393")
require.ErrorContains(t, "no default address entered", err)
})
t.Run("Not a hex", func(t *testing.T) {
err := validateIsExecutionAddress("b698D697092822185bF0311052215d5B5e1F393310")
require.ErrorContains(t, "no default address entered", err)
})
}

View File

@@ -9,6 +9,7 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v4/cmd/prysmctl/weaksubjectivity",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//api/client/beacon:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",

Some files were not shown because too many files have changed in this diff Show More