mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
95 Commits
v4.0.4-pat
...
fcTesting2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0ffccc9c98 | ||
|
|
1b915b51b0 | ||
|
|
d35461affd | ||
|
|
ee159f3380 | ||
|
|
6b3d18cb77 | ||
|
|
07955c891b | ||
|
|
57f97feb84 | ||
|
|
2bf0560dc7 | ||
|
|
cf0505b8db | ||
|
|
a40f903f76 | ||
|
|
3a9764d3af | ||
|
|
d1d3edc7fe | ||
|
|
ba55ae8cea | ||
|
|
27aac105d7 | ||
|
|
115d565f49 | ||
|
|
019e0b56e2 | ||
|
|
0efb038984 | ||
|
|
63d81144e9 | ||
|
|
6edbfa3128 | ||
|
|
194b3b1c5e | ||
|
|
996ec67229 | ||
|
|
c7b2c011d8 | ||
|
|
d15122fae2 | ||
|
|
3e17dbb532 | ||
|
|
a75e78ddb4 | ||
|
|
1862422db9 | ||
|
|
152d21059e | ||
|
|
2b410893a0 | ||
|
|
826267310e | ||
|
|
d5057cfb42 | ||
|
|
8d01cf2ec1 | ||
|
|
e4e315da94 | ||
|
|
0a4e42545e | ||
|
|
6fa2d768b5 | ||
|
|
0f228896b0 | ||
|
|
6896b41963 | ||
|
|
3bf6abe27c | ||
|
|
c1391f0de3 | ||
|
|
6672d1499a | ||
|
|
33cf52831c | ||
|
|
d543e9be00 | ||
|
|
0669050ffa | ||
|
|
ceff0c2024 | ||
|
|
c32b581e8e | ||
|
|
e516a2004f | ||
|
|
cb65d8af96 | ||
|
|
70152bf476 | ||
|
|
8aa688729d | ||
|
|
1ffc92999f | ||
|
|
bd0d7478b3 | ||
|
|
b6a1da21f4 | ||
|
|
2dcef85f97 | ||
|
|
52da7b3de6 | ||
|
|
180058ed48 | ||
|
|
f7a567d1d3 | ||
|
|
6d02c9ae12 | ||
|
|
6c2e6ca855 | ||
|
|
fbdccf8055 | ||
|
|
83cfe11ca0 | ||
|
|
135e9f51ec | ||
|
|
be16b64535 | ||
|
|
f4d3939b62 | ||
|
|
245d8a29e0 | ||
|
|
666188dfea | ||
|
|
cfa64ae013 | ||
|
|
cd0f814f2e | ||
|
|
d33c1974da | ||
|
|
88a2e3d953 | ||
|
|
cea42a4b7d | ||
|
|
abc81e6dde | ||
|
|
9971d71bc5 | ||
|
|
6b26183e73 | ||
|
|
7fe935e94d | ||
|
|
e0e7c71eb5 | ||
|
|
c80019bd0b | ||
|
|
8dfb92c605 | ||
|
|
9d192a3608 | ||
|
|
51bde7a845 | ||
|
|
385a317902 | ||
|
|
b84dd40ba9 | ||
|
|
aeaa72fdc2 | ||
|
|
ddc1e48e05 | ||
|
|
f91159337b | ||
|
|
537236e1c9 | ||
|
|
73e4bdccbb | ||
|
|
f54bd64bdd | ||
|
|
d907cae595 | ||
|
|
be23773924 | ||
|
|
29f6de1e96 | ||
|
|
955a21fea4 | ||
|
|
b4f1fea029 | ||
|
|
f1b88d005d | ||
|
|
ee612d958a | ||
|
|
09e22538f9 | ||
|
|
3b9e974a45 |
@@ -1,6 +1,6 @@
|
||||
# Contribution Guidelines
|
||||
|
||||
Note: The latest and most up to date documenation can be found on our [docs portal](https://docs.prylabs.network/docs/contribute/contribution-guidelines).
|
||||
Note: The latest and most up-to-date documentation can be found on our [docs portal](https://docs.prylabs.network/docs/contribute/contribution-guidelines).
|
||||
|
||||
Excited by our work and want to get involved in building out our sharding releases? Or maybe you haven't learned as much about the Ethereum protocol but are a savvy developer?
|
||||
|
||||
@@ -10,9 +10,9 @@ You can explore our [Open Issues](https://github.com/prysmaticlabs/prysm/issues)
|
||||
|
||||
**1. Set up Prysm following the instructions in README.md.**
|
||||
|
||||
**2. Fork the prysm repo.**
|
||||
**2. Fork the Prysm repo.**
|
||||
|
||||
Sign in to your Github account or create a new account if you do not have one already. Then navigate your browser to https://github.com/prysmaticlabs/prysm/. In the upper right hand corner of the page, click “fork”. This will create a copy of the Prysm repo in your account.
|
||||
Sign in to your GitHub account or create a new account if you do not have one already. Then navigate your browser to https://github.com/prysmaticlabs/prysm/. In the upper right hand corner of the page, click “fork”. This will create a copy of the Prysm repo in your account.
|
||||
|
||||
**3. Create a local clone of Prysm.**
|
||||
|
||||
@@ -23,7 +23,7 @@ $ git clone https://github.com/prysmaticlabs/prysm.git
|
||||
$ cd $GOPATH/src/github.com/prysmaticlabs/prysm
|
||||
```
|
||||
|
||||
**4. Link your local clone to the fork on your Github repo.**
|
||||
**4. Link your local clone to the fork on your GitHub repo.**
|
||||
|
||||
```
|
||||
$ git remote add myprysmrepo https://github.com/<your_github_user_name>/prysm.git
|
||||
@@ -68,7 +68,7 @@ $ go test <file_you_are_working_on>
|
||||
$ git add --all
|
||||
```
|
||||
|
||||
This command stages all of the files that you have changed. You can add individual files by specifying the file name or names and eliminating the “-- all”.
|
||||
This command stages all the files that you have changed. You can add individual files by specifying the file name or names and eliminating the “-- all”.
|
||||
|
||||
**11. Commit the file or files.**
|
||||
|
||||
@@ -96,8 +96,7 @@ If there are conflicts between your edits and those made by others since you sta
|
||||
$ git status
|
||||
```
|
||||
|
||||
Open those files one at a time and you
|
||||
will see lines inserted by Git that identify the conflicts:
|
||||
Open those files one at a time, and you will see lines inserted by Git that identify the conflicts:
|
||||
|
||||
```
|
||||
<<<<<< HEAD
|
||||
@@ -119,7 +118,7 @@ $ git push myrepo feature-in-progress-branch
|
||||
|
||||
**15. Check to be sure your fork of the Prysm repo contains your feature branch with the latest edits.**
|
||||
|
||||
Navigate to your fork of the repo on Github. On the upper left where the current branch is listed, change the branch to your feature-in-progress-branch. Open the files that you have worked on and check to make sure they include your changes.
|
||||
Navigate to your fork of the repo on GitHub. On the upper left where the current branch is listed, change the branch to your feature-in-progress-branch. Open the files that you have worked on and check to make sure they include your changes.
|
||||
|
||||
**16. Create a pull request.**
|
||||
|
||||
@@ -151,7 +150,7 @@ pick hash fix a bug
|
||||
pick hash add a feature
|
||||
```
|
||||
|
||||
Replace the word pick with the word “squash” for every line but the first so you end with ….
|
||||
Replace the word pick with the word “squash” for every line but the first, so you end with ….
|
||||
|
||||
```
|
||||
pick hash do some work
|
||||
@@ -178,7 +177,7 @@ We consider two types of contributions to our repo and categorize them as follow
|
||||
Anyone can become a part-time contributor and help out on implementing Ethereum consensus. The responsibilities of a part-time contributor include:
|
||||
|
||||
- Engaging in Gitter conversations, asking the questions on how to begin contributing to the project
|
||||
- Opening up github issues to express interest in code to implement
|
||||
- Opening up GitHub issues to express interest in code to implement
|
||||
- Opening up PRs referencing any open issue in the repo. PRs should include:
|
||||
- Detailed context of what would be required for merge
|
||||
- Tests that are consistent with how other tests are written in our implementation
|
||||
@@ -188,12 +187,12 @@ Anyone can become a part-time contributor and help out on implementing Ethereum
|
||||
|
||||
### Core Contributors
|
||||
|
||||
Core contributors are remote contractors of Prysmatic Labs, LLC. and are considered critical team members of our organization. Core devs have all of the responsibilities of part-time contributors plus the majority of the following:
|
||||
Core contributors are remote contractors of Prysmatic Labs, LLC. and are considered critical team members of our organization. Core devs have all the responsibilities of part-time contributors plus the majority of the following:
|
||||
|
||||
- Stay up to date on the latest beacon chain specification
|
||||
- Monitor github issues and PR’s to make sure owner, labels, descriptions are correct
|
||||
- Monitor GitHub issues and PR’s to make sure owner, labels, descriptions are correct
|
||||
- Formulate independent ideas, suggest new work to do, point out improvements to existing approaches
|
||||
- Participate in code review, ensure code quality is excellent, and have ensure high code coverage
|
||||
- Participate in code review, ensure code quality is excellent, and ensure high code coverage
|
||||
- Help with social media presence, write bi-weekly development update
|
||||
- Represent Prysmatic Labs at events to help spread the word on scalability research and solutions
|
||||
|
||||
|
||||
@@ -4,14 +4,14 @@
|
||||
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.3.0)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
[](https://discord.gg/prysmaticlabs)
|
||||
[](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
|
||||
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||
|
||||
### Getting Started
|
||||
|
||||
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/CTYGPUJ).
|
||||
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/prysmaticlabs).
|
||||
|
||||
### Staking on Mainnet
|
||||
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -205,7 +205,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.3.0"
|
||||
consensus_spec_version = "v1.4.0-alpha.1"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -221,7 +221,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "1c806e04ac5e3779032c06a6009350b3836b6809bb23812993d6ececd7047cf5",
|
||||
sha256 = "1118a663be4a00ba00f0635eb20287157f2b2f993aed64335bfbcd04af424c2b",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -237,7 +237,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "2b42796dc5ccd9f1246032d0c17663e20f70334ff7e00325f0fc3af28cb24186",
|
||||
sha256 = "acde6e10940d14f22277eda5b55b65a24623ac88e4c7a2e34134a6069f5eea82",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -253,7 +253,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "231e3371e81ce9acde65d2910ec4580587e74dbbcfcbd9c675e473e022deec8a",
|
||||
sha256 = "49c022f3a3478cea849ba8f877a9f7e4c1ded549edddc09993550bbc5bb192e1",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -268,7 +268,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "219b74d95664ea7e8dfbf31162dfa206b9c0cf45919ea86db5fa0f8902977e3c",
|
||||
sha256 = "c3e246ff01f6b7b9e9e41939954a6ff89dfca7297415f88781809165fa83267c",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
20
api/client/BUILD.bazel
Normal file
20
api/client/BUILD.bazel
Normal file
@@ -0,0 +1,20 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"client.go",
|
||||
"errors.go",
|
||||
"options.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/api/client",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@com_github_pkg_errors//:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["client_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//testing/require:go_default_library"],
|
||||
)
|
||||
@@ -6,11 +6,11 @@ go_library(
|
||||
"checkpoint.go",
|
||||
"client.go",
|
||||
"doc.go",
|
||||
"errors.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/beacon",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
@@ -39,6 +39,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -6,10 +6,12 @@ import (
|
||||
"path"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
base "github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/v4/io/file"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
@@ -18,6 +20,8 @@ import (
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
var errCheckpointBlockMismatch = errors.New("mismatch between checkpoint sync state and block")
|
||||
|
||||
// OriginData represents the BeaconState and ReadOnlySignedBeaconBlock necessary to start an empty Beacon Node
|
||||
// using Checkpoint Sync.
|
||||
type OriginData struct {
|
||||
@@ -74,37 +78,40 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
|
||||
}
|
||||
if s.Slot() != s.LatestBlockHeader().Slot {
|
||||
return nil, fmt.Errorf("finalized state slot does not match latest block header slot %d != %d", s.Slot(), s.LatestBlockHeader().Slot)
|
||||
}
|
||||
|
||||
sr, err := s.HashTreeRoot(ctx)
|
||||
slot := s.LatestBlockHeader().Slot
|
||||
bb, err := client.GetBlock(ctx, IdFromSlot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to compute htr for finalized state at slot=%d", s.Slot())
|
||||
}
|
||||
header := s.LatestBlockHeader()
|
||||
header.StateRoot = sr[:]
|
||||
br, err := header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error while computing block root using state data")
|
||||
}
|
||||
|
||||
bb, err := client.GetBlock(ctx, IdFromRoot(br))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting block by root = %#x", br)
|
||||
return nil, errors.Wrapf(err, "error requesting block by slot = %d", slot)
|
||||
}
|
||||
b, err := vu.UnmarshalBeaconBlock(bb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
|
||||
}
|
||||
realBlockRoot, err := b.Block().HashTreeRoot()
|
||||
br, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
|
||||
}
|
||||
bodyRoot, err := b.Block().Body().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block body")
|
||||
}
|
||||
|
||||
log.Printf("BeaconState slot=%d, Block slot=%d", s.Slot(), b.Block().Slot())
|
||||
log.Printf("BeaconState htr=%#x, Block state_root=%#x", sr, b.Block().StateRoot())
|
||||
log.Printf("BeaconState latest_block_header htr=%#x, block htr=%#x", br, realBlockRoot)
|
||||
sbr := bytesutil.ToBytes32(s.LatestBlockHeader().BodyRoot)
|
||||
if sbr != bodyRoot {
|
||||
return nil, errors.Wrapf(errCheckpointBlockMismatch, "state body root = %#x, block body root = %#x", sbr, bodyRoot)
|
||||
}
|
||||
sr, err := s.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to compute htr for finalized state at slot=%d", s.Slot())
|
||||
}
|
||||
|
||||
log.
|
||||
WithField("block_slot", b.Block().Slot()).
|
||||
WithField("state_slot", s.Slot()).
|
||||
WithField("state_root", sr).
|
||||
WithField("block_root", br).
|
||||
Info("Downloaded checkpoint sync state and block.")
|
||||
return &OriginData{
|
||||
st: s,
|
||||
b: b,
|
||||
@@ -140,7 +147,7 @@ func ComputeWeakSubjectivityCheckpoint(ctx context.Context, client *Client) (*We
|
||||
ws, err := client.GetWeakSubjectivity(ctx)
|
||||
if err != nil {
|
||||
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
|
||||
if !errors.Is(err, ErrNotOK) {
|
||||
if !errors.Is(err, base.ErrNotOK) {
|
||||
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
|
||||
}
|
||||
// fall back to vanilla Beacon Node API method
|
||||
|
||||
@@ -7,9 +7,9 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
blocktest "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks/testing"
|
||||
@@ -66,11 +66,7 @@ func TestMarshalToEnvelope(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFallbackVersionCheck(t *testing.T) {
|
||||
c := &Client{
|
||||
hc: &http.Client{},
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
c.hc.Transport = &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case getNodeVersionPath:
|
||||
@@ -88,12 +84,13 @@ func TestFallbackVersionCheck(t *testing.T) {
|
||||
case getWeakSubjectivityPath:
|
||||
res.StatusCode = http.StatusNotFound
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}}
|
||||
|
||||
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
|
||||
require.NoError(t, err)
|
||||
ctx := context.Background()
|
||||
_, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
|
||||
_, err = ComputeWeakSubjectivityCheckpoint(ctx, c)
|
||||
require.ErrorIs(t, err, errUnsupportedPrysmCheckpointVersion)
|
||||
}
|
||||
|
||||
@@ -170,44 +167,41 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
|
||||
Epoch: epoch,
|
||||
}
|
||||
|
||||
hc := &http.Client{
|
||||
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case getWeakSubjectivityPath:
|
||||
res.StatusCode = http.StatusOK
|
||||
cp := struct {
|
||||
Epoch string `json:"epoch"`
|
||||
Root string `json:"root"`
|
||||
}{
|
||||
Epoch: fmt.Sprintf("%d", slots.ToEpoch(b.Block().Slot())),
|
||||
Root: fmt.Sprintf("%#x", bRoot),
|
||||
}
|
||||
wsr := struct {
|
||||
Checkpoint interface{} `json:"ws_checkpoint"`
|
||||
StateRoot string `json:"state_root"`
|
||||
}{
|
||||
Checkpoint: cp,
|
||||
StateRoot: fmt.Sprintf("%#x", wRoot),
|
||||
}
|
||||
rb, err := marshalToEnvelope(wsr)
|
||||
require.NoError(t, err)
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(rb))
|
||||
case renderGetStatePath(IdFromSlot(wSlot)):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
|
||||
case renderGetBlockPath(IdFromRoot(bRoot)):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
|
||||
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case getWeakSubjectivityPath:
|
||||
res.StatusCode = http.StatusOK
|
||||
cp := struct {
|
||||
Epoch string `json:"epoch"`
|
||||
Root string `json:"root"`
|
||||
}{
|
||||
Epoch: fmt.Sprintf("%d", slots.ToEpoch(b.Block().Slot())),
|
||||
Root: fmt.Sprintf("%#x", bRoot),
|
||||
}
|
||||
wsr := struct {
|
||||
Checkpoint interface{} `json:"ws_checkpoint"`
|
||||
StateRoot string `json:"state_root"`
|
||||
}{
|
||||
Checkpoint: cp,
|
||||
StateRoot: fmt.Sprintf("%#x", wRoot),
|
||||
}
|
||||
rb, err := marshalToEnvelope(wsr)
|
||||
require.NoError(t, err)
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(rb))
|
||||
case renderGetStatePath(IdFromSlot(wSlot)):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
|
||||
case renderGetBlockPath(IdFromRoot(bRoot)):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}},
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
return res, nil
|
||||
}}
|
||||
|
||||
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
|
||||
require.NoError(t, err)
|
||||
|
||||
wsd, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
|
||||
require.NoError(t, err)
|
||||
@@ -266,42 +260,39 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
|
||||
wsSerialized, err := wst.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
hc := &http.Client{
|
||||
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case getNodeVersionPath:
|
||||
res.StatusCode = http.StatusOK
|
||||
b := bytes.NewBuffer(nil)
|
||||
d := struct {
|
||||
Version string `json:"version"`
|
||||
}{
|
||||
Version: "Lighthouse/v0.1.5 (Linux x86_64)",
|
||||
}
|
||||
encoded, err := marshalToEnvelope(d)
|
||||
require.NoError(t, err)
|
||||
b.Write(encoded)
|
||||
res.Body = io.NopCloser(b)
|
||||
case getWeakSubjectivityPath:
|
||||
res.StatusCode = http.StatusNotFound
|
||||
case renderGetStatePath(IdHead):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
|
||||
case renderGetStatePath(IdFromSlot(wSlot)):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
|
||||
case renderGetBlockPath(IdFromRoot(bRoot)):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
|
||||
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case getNodeVersionPath:
|
||||
res.StatusCode = http.StatusOK
|
||||
b := bytes.NewBuffer(nil)
|
||||
d := struct {
|
||||
Version string `json:"version"`
|
||||
}{
|
||||
Version: "Lighthouse/v0.1.5 (Linux x86_64)",
|
||||
}
|
||||
encoded, err := marshalToEnvelope(d)
|
||||
require.NoError(t, err)
|
||||
b.Write(encoded)
|
||||
res.Body = io.NopCloser(b)
|
||||
case getWeakSubjectivityPath:
|
||||
res.StatusCode = http.StatusNotFound
|
||||
case renderGetStatePath(IdHead):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
|
||||
case renderGetStatePath(IdFromSlot(wSlot)):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
|
||||
case renderGetBlockPath(IdFromRoot(bRoot)):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}},
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
return res, nil
|
||||
}}
|
||||
|
||||
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
|
||||
require.NoError(t, err)
|
||||
|
||||
wsPub, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
|
||||
require.NoError(t, err)
|
||||
@@ -315,21 +306,16 @@ func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
|
||||
st, expectedEpoch := defaultTestHeadState(t, params.MainnetConfig())
|
||||
serialized, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
hc := &http.Client{
|
||||
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case renderGetStatePath(IdHead):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
|
||||
}
|
||||
return res, nil
|
||||
}},
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
if req.URL.Path == renderGetStatePath(IdHead) {
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
|
||||
}
|
||||
return res, nil
|
||||
}}
|
||||
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
|
||||
require.NoError(t, err)
|
||||
actualEpoch, err := getWeakSubjectivityEpochFromHead(context.Background(), c)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedEpoch, actualEpoch)
|
||||
@@ -448,29 +434,24 @@ func TestDownloadFinalizedData(t *testing.T) {
|
||||
ms, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
hc := &http.Client{
|
||||
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case renderGetStatePath(IdFinalized):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(ms))
|
||||
case renderGetBlockPath(IdFromRoot(br)):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(mb))
|
||||
default:
|
||||
res.StatusCode = http.StatusInternalServerError
|
||||
res.Body = io.NopCloser(bytes.NewBufferString(""))
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}},
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case renderGetStatePath(IdFinalized):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(ms))
|
||||
case renderGetBlockPath(IdFromSlot(b.Block().Slot())):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(mb))
|
||||
default:
|
||||
res.StatusCode = http.StatusInternalServerError
|
||||
res.Body = io.NopCloser(bytes.NewBufferString(""))
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}}
|
||||
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
|
||||
require.NoError(t, err)
|
||||
// sanity check before we go through checkpoint
|
||||
// make sure we can download the state and unmarshal it with the VersionedUnmarshaler
|
||||
sb, err := c.GetState(ctx, IdFinalized)
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -14,8 +12,8 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
|
||||
@@ -54,8 +52,6 @@ const (
|
||||
IdFinalized StateOrBlockId = "finalized"
|
||||
)
|
||||
|
||||
var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
|
||||
|
||||
// IdFromRoot encodes a block root in the format expected by the API in places where a root can be used to identify
|
||||
// a BeaconState or SignedBeaconBlock.
|
||||
func IdFromRoot(r [32]byte) StateOrBlockId {
|
||||
@@ -85,96 +81,22 @@ func idTemplate(ts string) func(StateOrBlockId) string {
|
||||
return f
|
||||
}
|
||||
|
||||
// ClientOpt is a functional option for the Client type (http.Client wrapper)
|
||||
type ClientOpt func(*Client)
|
||||
|
||||
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
|
||||
func WithTimeout(timeout time.Duration) ClientOpt {
|
||||
return func(c *Client) {
|
||||
c.hc.Timeout = timeout
|
||||
}
|
||||
func renderGetBlockPath(id StateOrBlockId) string {
|
||||
return path.Join(getSignedBlockPath, string(id))
|
||||
}
|
||||
|
||||
// Client provides a collection of helper methods for calling the Eth Beacon Node API endpoints.
|
||||
type Client struct {
|
||||
hc *http.Client
|
||||
baseURL *url.URL
|
||||
*client.Client
|
||||
}
|
||||
|
||||
// NewClient constructs a new client with the provided options (ex WithTimeout).
|
||||
// `host` is the base host + port used to construct request urls. This value can be
|
||||
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
|
||||
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
||||
u, err := urlForHost(host)
|
||||
// NewClient returns a new Client that includes functions for rest calls to Beacon API.
|
||||
func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
|
||||
c, err := client.NewClient(host, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
hc: &http.Client{},
|
||||
baseURL: u,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(c)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func urlForHost(h string) (*url.URL, error) {
|
||||
// try to parse as url (being permissive)
|
||||
u, err := url.Parse(h)
|
||||
if err == nil && u.Host != "" {
|
||||
return u, nil
|
||||
}
|
||||
// try to parse as host:port
|
||||
host, port, err := net.SplitHostPort(h)
|
||||
if err != nil {
|
||||
return nil, ErrMalformedHostname
|
||||
}
|
||||
return &url.URL{Host: fmt.Sprintf("%s:%s", host, port), Scheme: "http"}, nil
|
||||
}
|
||||
|
||||
// NodeURL returns a human-readable string representation of the beacon node base url.
|
||||
func (c *Client) NodeURL() string {
|
||||
return c.baseURL.String()
|
||||
}
|
||||
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
func withSSZEncoding() reqOption {
|
||||
return func(req *http.Request) {
|
||||
req.Header.Set("Accept", "application/octet-stream")
|
||||
}
|
||||
}
|
||||
|
||||
// get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
|
||||
func (c *Client) get(ctx context.Context, path string, opts ...reqOption) ([]byte, error) {
|
||||
u := c.baseURL.ResolveReference(&url.URL{Path: path})
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(req)
|
||||
}
|
||||
r, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
err = r.Body.Close()
|
||||
}()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return nil, non200Err(r)
|
||||
}
|
||||
b, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error reading http response body from GetBlock")
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func renderGetBlockPath(id StateOrBlockId) string {
|
||||
return path.Join(getSignedBlockPath, string(id))
|
||||
return &Client{c}, nil
|
||||
}
|
||||
|
||||
// GetBlock retrieves the SignedBeaconBlock for the given block id.
|
||||
@@ -184,7 +106,7 @@ func renderGetBlockPath(id StateOrBlockId) string {
|
||||
// The return value contains the ssz-encoded bytes.
|
||||
func (c *Client) GetBlock(ctx context.Context, blockId StateOrBlockId) ([]byte, error) {
|
||||
blockPath := renderGetBlockPath(blockId)
|
||||
b, err := c.get(ctx, blockPath, withSSZEncoding())
|
||||
b, err := c.Get(ctx, blockPath, client.WithSSZEncoding())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting state by id = %s", blockId)
|
||||
}
|
||||
@@ -199,7 +121,7 @@ var getBlockRootTpl = idTemplate(getBlockRootPath)
|
||||
// for the named identifiers.
|
||||
func (c *Client) GetBlockRoot(ctx context.Context, blockId StateOrBlockId) ([32]byte, error) {
|
||||
rootPath := getBlockRootTpl(blockId)
|
||||
b, err := c.get(ctx, rootPath)
|
||||
b, err := c.Get(ctx, rootPath)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrapf(err, "error requesting block root by id = %s", blockId)
|
||||
}
|
||||
@@ -222,7 +144,7 @@ var getForkTpl = idTemplate(getForkForStatePath)
|
||||
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
|
||||
// for the named identifiers.
|
||||
func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fork, error) {
|
||||
body, err := c.get(ctx, getForkTpl(stateId))
|
||||
body, err := c.Get(ctx, getForkTpl(stateId))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting fork by state id = %s", stateId)
|
||||
}
|
||||
@@ -238,7 +160,7 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
|
||||
|
||||
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
|
||||
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
|
||||
body, err := c.get(ctx, getForkSchedulePath)
|
||||
body, err := c.Get(ctx, getForkSchedulePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting fork schedule")
|
||||
}
|
||||
@@ -256,7 +178,7 @@ func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, er
|
||||
|
||||
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
|
||||
func (c *Client) GetConfigSpec(ctx context.Context) (*v1.SpecResponse, error) {
|
||||
body, err := c.get(ctx, getConfigSpecPath)
|
||||
body, err := c.Get(ctx, getConfigSpecPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting configSpecPath")
|
||||
}
|
||||
@@ -279,7 +201,7 @@ var versionRE = regexp.MustCompile(`^(\w+)/(v\d+\.\d+\.\d+[-a-zA-Z0-9]*)\s*/?(.*
|
||||
func parseNodeVersion(v string) (*NodeVersion, error) {
|
||||
groups := versionRE.FindStringSubmatch(v)
|
||||
if len(groups) != 4 {
|
||||
return nil, errors.Wrapf(ErrInvalidNodeVersion, "could not be parsed: %s", v)
|
||||
return nil, errors.Wrapf(client.ErrInvalidNodeVersion, "could not be parsed: %s", v)
|
||||
}
|
||||
return &NodeVersion{
|
||||
implementation: groups[1],
|
||||
@@ -291,7 +213,7 @@ func parseNodeVersion(v string) (*NodeVersion, error) {
|
||||
// GetNodeVersion requests that the beacon node identify information about its implementation in a format
|
||||
// similar to a HTTP User-Agent field. ex: Lighthouse/v0.1.5 (Linux x86_64)
|
||||
func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
|
||||
b, err := c.get(ctx, getNodeVersionPath)
|
||||
b, err := c.Get(ctx, getNodeVersionPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting node version")
|
||||
}
|
||||
@@ -318,7 +240,7 @@ func renderGetStatePath(id StateOrBlockId) string {
|
||||
// The return value contains the ssz-encoded bytes.
|
||||
func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte, error) {
|
||||
statePath := path.Join(getStatePath, string(stateId))
|
||||
b, err := c.get(ctx, statePath, withSSZEncoding())
|
||||
b, err := c.Get(ctx, statePath, client.WithSSZEncoding())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting state by id = %s", stateId)
|
||||
}
|
||||
@@ -331,7 +253,7 @@ func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte,
|
||||
// - finds the highest non-skipped block preceding the epoch
|
||||
// - returns the htr of the found block and returns this + the value of state_root from the block
|
||||
func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData, error) {
|
||||
body, err := c.get(ctx, getWeakSubjectivityPath)
|
||||
body, err := c.Get(ctx, getWeakSubjectivityPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -362,7 +284,7 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
|
||||
// SubmitChangeBLStoExecution calls a beacon API endpoint to set the withdrawal addresses based on the given signed messages.
|
||||
// If the API responds with something other than OK there will be failure messages associated to the corresponding request message.
|
||||
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apimiddleware.SignedBLSToExecutionChangeJson) error {
|
||||
u := c.baseURL.ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
|
||||
u := c.BaseURL().ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
|
||||
body, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal JSON")
|
||||
@@ -372,7 +294,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apim
|
||||
return errors.Wrap(err, "invalid format, failed to create new POST request object")
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
resp, err := c.hc.Do(req)
|
||||
resp, err := c.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -401,7 +323,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apim
|
||||
// GetBLStoExecutionChanges gets all the set withdrawal messages in the node's operation pool.
|
||||
// Returns a struct representation of json response.
|
||||
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.BLSToExecutionChangesPoolResponseJson, error) {
|
||||
body, err := c.get(ctx, changeBLStoExecutionPath)
|
||||
body, err := c.Get(ctx, changeBLStoExecutionPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -413,23 +335,6 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.B
|
||||
return poolResponse, nil
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
var body string
|
||||
if err != nil {
|
||||
body = "(Unable to read response body.)"
|
||||
} else {
|
||||
body = "response body:\n" + string(bodyBytes)
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
switch response.StatusCode {
|
||||
case 404:
|
||||
return errors.Wrap(ErrNotFound, msg)
|
||||
default:
|
||||
return errors.Wrap(ErrNotOK, msg)
|
||||
}
|
||||
}
|
||||
|
||||
type forkResponse struct {
|
||||
PreviousVersion string `json:"previous_version"`
|
||||
CurrentVersion string `json:"current_version"`
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
@@ -17,17 +18,17 @@ func TestParseNodeVersion(t *testing.T) {
|
||||
{
|
||||
name: "empty string",
|
||||
v: "",
|
||||
err: ErrInvalidNodeVersion,
|
||||
err: client.ErrInvalidNodeVersion,
|
||||
},
|
||||
{
|
||||
name: "Prysm as the version string",
|
||||
v: "Prysm",
|
||||
err: ErrInvalidNodeVersion,
|
||||
err: client.ErrInvalidNodeVersion,
|
||||
},
|
||||
{
|
||||
name: "semver only",
|
||||
v: "v2.0.6",
|
||||
err: ErrInvalidNodeVersion,
|
||||
err: client.ErrInvalidNodeVersion,
|
||||
},
|
||||
{
|
||||
name: "complete version",
|
||||
@@ -91,7 +92,7 @@ func TestValidHostname(t *testing.T) {
|
||||
{
|
||||
name: "hostname without port",
|
||||
hostArg: "mydomain.org",
|
||||
err: ErrMalformedHostname,
|
||||
err: client.ErrMalformedHostname,
|
||||
},
|
||||
{
|
||||
name: "hostname with port",
|
||||
@@ -132,7 +133,7 @@ func TestValidHostname(t *testing.T) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.joined, cl.baseURL.ResolveReference(&url.URL{Path: c.path}).String())
|
||||
require.Equal(t, c.joined, cl.BaseURL().ResolveReference(&url.URL{Path: c.path}).String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
package beacon
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
// ErrNotOK is used to indicate when an HTTP request to the Beacon Node API failed with any non-2xx response code.
|
||||
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
|
||||
var ErrNotOK = errors.New("did not receive 2xx response from API")
|
||||
|
||||
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
|
||||
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
|
||||
|
||||
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version api response format was not recognized.
|
||||
var ErrInvalidNodeVersion = errors.New("invalid node version response")
|
||||
@@ -3,7 +3,6 @@ package builder
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -162,9 +161,6 @@ func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
|
||||
|
||||
// Header returns the execution data interface.
|
||||
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
|
||||
if b.p == nil {
|
||||
return nil, errors.New("builder bid is nil")
|
||||
}
|
||||
// We have to convert big endian to little endian because the value is coming from the execution layer.
|
||||
v := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(b.p.Value))
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, math.WeiToGwei(v))
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"net/url"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -36,7 +35,6 @@ const (
|
||||
var errMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
|
||||
var errMalformedRequest = errors.New("required request data are missing")
|
||||
var errNotBlinded = errors.New("submitted block is not blinded")
|
||||
var submitBlindedBlockTimeout = 3 * time.Second
|
||||
|
||||
// ClientOpt is a functional option for the Client type (http.Client wrapper)
|
||||
type ClientOpt func(*Client)
|
||||
@@ -292,8 +290,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
|
||||
defer cancel()
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
|
||||
}
|
||||
@@ -325,8 +321,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
|
||||
defer cancel()
|
||||
versionOpt := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
|
||||
}
|
||||
|
||||
@@ -14,14 +14,17 @@ import (
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// SignedValidatorRegistration a struct for signed validator registrations.
|
||||
type SignedValidatorRegistration struct {
|
||||
*eth.SignedValidatorRegistrationV1
|
||||
}
|
||||
|
||||
// ValidatorRegistration a struct for validator registrations.
|
||||
type ValidatorRegistration struct {
|
||||
*eth.ValidatorRegistrationV1
|
||||
}
|
||||
|
||||
// MarshalJSON returns a json representation copy of signed validator registration.
|
||||
func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *ValidatorRegistration `json:"message"`
|
||||
@@ -32,6 +35,7 @@ func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalJSON returns a byte representation of signed validator registration from json.
|
||||
func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||
if r.SignedValidatorRegistrationV1 == nil {
|
||||
r.SignedValidatorRegistrationV1 = ð.SignedValidatorRegistrationV1{}
|
||||
@@ -48,6 +52,7 @@ func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON returns a json representation copy of validator registration.
|
||||
func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||
@@ -62,6 +67,7 @@ func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalJSON returns a byte representation of validator registration from json.
|
||||
func (r *ValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||
if r.ValidatorRegistrationV1 == nil {
|
||||
r.ValidatorRegistrationV1 = ð.ValidatorRegistrationV1{}
|
||||
@@ -92,6 +98,7 @@ func (r *ValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||
var errInvalidUint256 = errors.New("invalid Uint256")
|
||||
var errDecodeUint256 = errors.New("unable to decode into Uint256")
|
||||
|
||||
// Uint256 a wrapper representation of big.Int
|
||||
type Uint256 struct {
|
||||
*big.Int
|
||||
}
|
||||
@@ -118,7 +125,7 @@ func sszBytesToUint256(b []byte) (Uint256, error) {
|
||||
return Uint256{Int: bi}, nil
|
||||
}
|
||||
|
||||
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256
|
||||
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256.
|
||||
func (s Uint256) SSZBytes() []byte {
|
||||
if !isValidUint256(s.Int) {
|
||||
return []byte{}
|
||||
@@ -126,18 +133,19 @@ func (s Uint256) SSZBytes() []byte {
|
||||
return bytesutil.PadTo(bytesutil.ReverseByteOrder(s.Int.Bytes()), 32)
|
||||
}
|
||||
|
||||
// UnmarshalJSON takes in a byte array and unmarshals the value in Uint256
|
||||
func (s *Uint256) UnmarshalJSON(t []byte) error {
|
||||
start := 0
|
||||
end := len(t)
|
||||
if t[0] == '"' {
|
||||
start += 1
|
||||
if len(t) < 2 {
|
||||
return errors.Errorf("provided Uint256 json string is too short: %s", string(t))
|
||||
}
|
||||
if t[end-1] == '"' {
|
||||
end -= 1
|
||||
if t[0] != '"' || t[end-1] != '"' {
|
||||
return errors.Errorf("provided Uint256 json string is malformed: %s", string(t))
|
||||
}
|
||||
return s.UnmarshalText(t[start:end])
|
||||
return s.UnmarshalText(t[1 : end-1])
|
||||
}
|
||||
|
||||
// UnmarshalText takes in a byte array and unmarshals the text in Uint256
|
||||
func (s *Uint256) UnmarshalText(t []byte) error {
|
||||
if s.Int == nil {
|
||||
s.Int = big.NewInt(0)
|
||||
@@ -153,6 +161,7 @@ func (s *Uint256) UnmarshalText(t []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON returns a json byte representation of Uint256.
|
||||
func (s Uint256) MarshalJSON() ([]byte, error) {
|
||||
t, err := s.MarshalText()
|
||||
if err != nil {
|
||||
@@ -163,6 +172,7 @@ func (s Uint256) MarshalJSON() ([]byte, error) {
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// MarshalText returns a text byte representation of Uint256.
|
||||
func (s Uint256) MarshalText() ([]byte, error) {
|
||||
if !isValidUint256(s.Int) {
|
||||
return nil, errors.Wrapf(errInvalidUint256, "value=%s", s.Int)
|
||||
@@ -170,22 +180,27 @@ func (s Uint256) MarshalText() ([]byte, error) {
|
||||
return []byte(s.String()), nil
|
||||
}
|
||||
|
||||
// Uint64String is a custom type that allows marshalling from text to uint64 and vice versa.
|
||||
type Uint64String uint64
|
||||
|
||||
// UnmarshalText takes a byte array and unmarshals the text in Uint64String.
|
||||
func (s *Uint64String) UnmarshalText(t []byte) error {
|
||||
u, err := strconv.ParseUint(string(t), 10, 64)
|
||||
*s = Uint64String(u)
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalText returns a byte representation of the text from Uint64String.
|
||||
func (s Uint64String) MarshalText() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("%d", s)), nil
|
||||
}
|
||||
|
||||
// VersionResponse is a JSON representation of a field in the builder API header response.
|
||||
type VersionResponse struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// ExecHeaderResponse is a JSON representation of the builder API header response for Bellatrix.
|
||||
type ExecHeaderResponse struct {
|
||||
Version string `json:"version"`
|
||||
Data struct {
|
||||
@@ -194,6 +209,7 @@ type ExecHeaderResponse struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// ToProto returns a SignedBuilderBid from ExecHeaderResponse for Bellatrix.
|
||||
func (ehr *ExecHeaderResponse) ToProto() (*eth.SignedBuilderBid, error) {
|
||||
bb, err := ehr.Data.Message.ToProto()
|
||||
if err != nil {
|
||||
@@ -205,6 +221,7 @@ func (ehr *ExecHeaderResponse) ToProto() (*eth.SignedBuilderBid, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ToProto returns a BuilderBid Proto for Bellatrix.
|
||||
func (bb *BuilderBid) ToProto() (*eth.BuilderBid, error) {
|
||||
header, err := bb.Header.ToProto()
|
||||
if err != nil {
|
||||
@@ -217,31 +234,34 @@ func (bb *BuilderBid) ToProto() (*eth.BuilderBid, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ToProto returns a ExecutionPayloadHeader for Bellatrix.
|
||||
func (h *ExecutionPayloadHeader) ToProto() (*v1.ExecutionPayloadHeader, error) {
|
||||
return &v1.ExecutionPayloadHeader{
|
||||
ParentHash: h.ParentHash,
|
||||
FeeRecipient: h.FeeRecipient,
|
||||
StateRoot: h.StateRoot,
|
||||
ReceiptsRoot: h.ReceiptsRoot,
|
||||
LogsBloom: h.LogsBloom,
|
||||
PrevRandao: h.PrevRandao,
|
||||
ParentHash: bytesutil.SafeCopyBytes(h.ParentHash),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(h.FeeRecipient),
|
||||
StateRoot: bytesutil.SafeCopyBytes(h.StateRoot),
|
||||
ReceiptsRoot: bytesutil.SafeCopyBytes(h.ReceiptsRoot),
|
||||
LogsBloom: bytesutil.SafeCopyBytes(h.LogsBloom),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(h.PrevRandao),
|
||||
BlockNumber: uint64(h.BlockNumber),
|
||||
GasLimit: uint64(h.GasLimit),
|
||||
GasUsed: uint64(h.GasUsed),
|
||||
Timestamp: uint64(h.Timestamp),
|
||||
ExtraData: h.ExtraData,
|
||||
BaseFeePerGas: h.BaseFeePerGas.SSZBytes(),
|
||||
BlockHash: h.BlockHash,
|
||||
TransactionsRoot: h.TransactionsRoot,
|
||||
ExtraData: bytesutil.SafeCopyBytes(h.ExtraData),
|
||||
BaseFeePerGas: bytesutil.SafeCopyBytes(h.BaseFeePerGas.SSZBytes()),
|
||||
BlockHash: bytesutil.SafeCopyBytes(h.BlockHash),
|
||||
TransactionsRoot: bytesutil.SafeCopyBytes(h.TransactionsRoot),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BuilderBid is part of ExecHeaderResponse for Bellatrix.
|
||||
type BuilderBid struct {
|
||||
Header *ExecutionPayloadHeader `json:"header"`
|
||||
Value Uint256 `json:"value"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey"`
|
||||
}
|
||||
|
||||
// ExecutionPayloadHeader is a field in BuilderBid.
|
||||
type ExecutionPayloadHeader struct {
|
||||
ParentHash hexutil.Bytes `json:"parent_hash"`
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||
@@ -260,6 +280,7 @@ type ExecutionPayloadHeader struct {
|
||||
*v1.ExecutionPayloadHeader
|
||||
}
|
||||
|
||||
// MarshalJSON returns the JSON bytes representation of ExecutionPayloadHeader.
|
||||
func (h *ExecutionPayloadHeader) MarshalJSON() ([]byte, error) {
|
||||
type MarshalCaller ExecutionPayloadHeader
|
||||
baseFeePerGas, err := sszBytesToUint256(h.ExecutionPayloadHeader.BaseFeePerGas)
|
||||
@@ -284,6 +305,7 @@ func (h *ExecutionPayloadHeader) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalJSON takes in a JSON byte array and sets ExecutionPayloadHeader.
|
||||
func (h *ExecutionPayloadHeader) UnmarshalJSON(b []byte) error {
|
||||
type UnmarshalCaller ExecutionPayloadHeader
|
||||
uc := &UnmarshalCaller{}
|
||||
@@ -297,11 +319,13 @@ func (h *ExecutionPayloadHeader) UnmarshalJSON(b []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecPayloadResponse is the builder API /eth/v1/builder/blinded_blocks for Bellatrix.
|
||||
type ExecPayloadResponse struct {
|
||||
Version string `json:"version"`
|
||||
Data ExecutionPayload `json:"data"`
|
||||
}
|
||||
|
||||
// ExecutionPayload is a field of ExecPayloadResponse
|
||||
type ExecutionPayload struct {
|
||||
ParentHash hexutil.Bytes `json:"parent_hash"`
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||
@@ -319,29 +343,31 @@ type ExecutionPayload struct {
|
||||
Transactions []hexutil.Bytes `json:"transactions"`
|
||||
}
|
||||
|
||||
// ToProto returns a ExecutionPayload Proto from ExecPayloadResponse
|
||||
func (r *ExecPayloadResponse) ToProto() (*v1.ExecutionPayload, error) {
|
||||
return r.Data.ToProto()
|
||||
}
|
||||
|
||||
// ToProto returns a ExecutionPayload Proto
|
||||
func (p *ExecutionPayload) ToProto() (*v1.ExecutionPayload, error) {
|
||||
txs := make([][]byte, len(p.Transactions))
|
||||
for i := range p.Transactions {
|
||||
txs[i] = p.Transactions[i]
|
||||
txs[i] = bytesutil.SafeCopyBytes(p.Transactions[i])
|
||||
}
|
||||
return &v1.ExecutionPayload{
|
||||
ParentHash: p.ParentHash,
|
||||
FeeRecipient: p.FeeRecipient,
|
||||
StateRoot: p.StateRoot,
|
||||
ReceiptsRoot: p.ReceiptsRoot,
|
||||
LogsBloom: p.LogsBloom,
|
||||
PrevRandao: p.PrevRandao,
|
||||
ParentHash: bytesutil.SafeCopyBytes(p.ParentHash),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(p.FeeRecipient),
|
||||
StateRoot: bytesutil.SafeCopyBytes(p.StateRoot),
|
||||
ReceiptsRoot: bytesutil.SafeCopyBytes(p.ReceiptsRoot),
|
||||
LogsBloom: bytesutil.SafeCopyBytes(p.LogsBloom),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(p.PrevRandao),
|
||||
BlockNumber: uint64(p.BlockNumber),
|
||||
GasLimit: uint64(p.GasLimit),
|
||||
GasUsed: uint64(p.GasUsed),
|
||||
Timestamp: uint64(p.Timestamp),
|
||||
ExtraData: p.ExtraData,
|
||||
BaseFeePerGas: p.BaseFeePerGas.SSZBytes(),
|
||||
BlockHash: p.BlockHash,
|
||||
ExtraData: bytesutil.SafeCopyBytes(p.ExtraData),
|
||||
BaseFeePerGas: bytesutil.SafeCopyBytes(p.BaseFeePerGas.SSZBytes()),
|
||||
BlockHash: bytesutil.SafeCopyBytes(p.BlockHash),
|
||||
Transactions: txs,
|
||||
}, nil
|
||||
}
|
||||
@@ -355,22 +381,22 @@ func FromProto(payload *v1.ExecutionPayload) (ExecutionPayload, error) {
|
||||
}
|
||||
txs := make([]hexutil.Bytes, len(payload.Transactions))
|
||||
for i := range payload.Transactions {
|
||||
txs[i] = payload.Transactions[i]
|
||||
txs[i] = bytesutil.SafeCopyBytes(payload.Transactions[i])
|
||||
}
|
||||
return ExecutionPayload{
|
||||
ParentHash: payload.ParentHash,
|
||||
FeeRecipient: payload.FeeRecipient,
|
||||
StateRoot: payload.StateRoot,
|
||||
ReceiptsRoot: payload.ReceiptsRoot,
|
||||
LogsBloom: payload.LogsBloom,
|
||||
PrevRandao: payload.PrevRandao,
|
||||
ParentHash: bytesutil.SafeCopyBytes(payload.ParentHash),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(payload.FeeRecipient),
|
||||
StateRoot: bytesutil.SafeCopyBytes(payload.StateRoot),
|
||||
ReceiptsRoot: bytesutil.SafeCopyBytes(payload.ReceiptsRoot),
|
||||
LogsBloom: bytesutil.SafeCopyBytes(payload.LogsBloom),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(payload.PrevRandao),
|
||||
BlockNumber: Uint64String(payload.BlockNumber),
|
||||
GasLimit: Uint64String(payload.GasLimit),
|
||||
GasUsed: Uint64String(payload.GasUsed),
|
||||
Timestamp: Uint64String(payload.Timestamp),
|
||||
ExtraData: payload.ExtraData,
|
||||
ExtraData: bytesutil.SafeCopyBytes(payload.ExtraData),
|
||||
BaseFeePerGas: bFee,
|
||||
BlockHash: payload.BlockHash,
|
||||
BlockHash: bytesutil.SafeCopyBytes(payload.BlockHash),
|
||||
Transactions: txs,
|
||||
}, nil
|
||||
}
|
||||
@@ -384,36 +410,37 @@ func FromProtoCapella(payload *v1.ExecutionPayloadCapella) (ExecutionPayloadCape
|
||||
}
|
||||
txs := make([]hexutil.Bytes, len(payload.Transactions))
|
||||
for i := range payload.Transactions {
|
||||
txs[i] = payload.Transactions[i]
|
||||
txs[i] = bytesutil.SafeCopyBytes(payload.Transactions[i])
|
||||
}
|
||||
withdrawals := make([]Withdrawal, len(payload.Withdrawals))
|
||||
for i, w := range payload.Withdrawals {
|
||||
withdrawals[i] = Withdrawal{
|
||||
Index: Uint256{Int: big.NewInt(0).SetUint64(w.Index)},
|
||||
ValidatorIndex: Uint256{Int: big.NewInt(0).SetUint64(uint64(w.ValidatorIndex))},
|
||||
Address: w.Address,
|
||||
Address: bytesutil.SafeCopyBytes(w.Address),
|
||||
Amount: Uint256{Int: big.NewInt(0).SetUint64(w.Amount)},
|
||||
}
|
||||
}
|
||||
return ExecutionPayloadCapella{
|
||||
ParentHash: payload.ParentHash,
|
||||
FeeRecipient: payload.FeeRecipient,
|
||||
StateRoot: payload.StateRoot,
|
||||
ReceiptsRoot: payload.ReceiptsRoot,
|
||||
LogsBloom: payload.LogsBloom,
|
||||
PrevRandao: payload.PrevRandao,
|
||||
ParentHash: bytesutil.SafeCopyBytes(payload.ParentHash),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(payload.FeeRecipient),
|
||||
StateRoot: bytesutil.SafeCopyBytes(payload.StateRoot),
|
||||
ReceiptsRoot: bytesutil.SafeCopyBytes(payload.ReceiptsRoot),
|
||||
LogsBloom: bytesutil.SafeCopyBytes(payload.LogsBloom),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(payload.PrevRandao),
|
||||
BlockNumber: Uint64String(payload.BlockNumber),
|
||||
GasLimit: Uint64String(payload.GasLimit),
|
||||
GasUsed: Uint64String(payload.GasUsed),
|
||||
Timestamp: Uint64String(payload.Timestamp),
|
||||
ExtraData: payload.ExtraData,
|
||||
ExtraData: bytesutil.SafeCopyBytes(payload.ExtraData),
|
||||
BaseFeePerGas: bFee,
|
||||
BlockHash: payload.BlockHash,
|
||||
BlockHash: bytesutil.SafeCopyBytes(payload.BlockHash),
|
||||
Transactions: txs,
|
||||
Withdrawals: withdrawals,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExecHeaderResponseCapella is the response of builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey} for Capella.
|
||||
type ExecHeaderResponseCapella struct {
|
||||
Data struct {
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
@@ -421,6 +448,7 @@ type ExecHeaderResponseCapella struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// ToProto returns a SignedBuilderBidCapella Proto from ExecHeaderResponseCapella.
|
||||
func (ehr *ExecHeaderResponseCapella) ToProto() (*eth.SignedBuilderBidCapella, error) {
|
||||
bb, err := ehr.Data.Message.ToProto()
|
||||
if err != nil {
|
||||
@@ -428,10 +456,11 @@ func (ehr *ExecHeaderResponseCapella) ToProto() (*eth.SignedBuilderBidCapella, e
|
||||
}
|
||||
return ð.SignedBuilderBidCapella{
|
||||
Message: bb,
|
||||
Signature: ehr.Data.Signature,
|
||||
Signature: bytesutil.SafeCopyBytes(ehr.Data.Signature),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ToProto returns a BuilderBidCapella Proto.
|
||||
func (bb *BuilderBidCapella) ToProto() (*eth.BuilderBidCapella, error) {
|
||||
header, err := bb.Header.ToProto()
|
||||
if err != nil {
|
||||
@@ -439,37 +468,40 @@ func (bb *BuilderBidCapella) ToProto() (*eth.BuilderBidCapella, error) {
|
||||
}
|
||||
return ð.BuilderBidCapella{
|
||||
Header: header,
|
||||
Value: bb.Value.SSZBytes(),
|
||||
Pubkey: bb.Pubkey,
|
||||
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
|
||||
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ToProto returns a ExecutionPayloadHeaderCapella Proto
|
||||
func (h *ExecutionPayloadHeaderCapella) ToProto() (*v1.ExecutionPayloadHeaderCapella, error) {
|
||||
return &v1.ExecutionPayloadHeaderCapella{
|
||||
ParentHash: h.ParentHash,
|
||||
FeeRecipient: h.FeeRecipient,
|
||||
StateRoot: h.StateRoot,
|
||||
ReceiptsRoot: h.ReceiptsRoot,
|
||||
LogsBloom: h.LogsBloom,
|
||||
PrevRandao: h.PrevRandao,
|
||||
ParentHash: bytesutil.SafeCopyBytes(h.ParentHash),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(h.FeeRecipient),
|
||||
StateRoot: bytesutil.SafeCopyBytes(h.StateRoot),
|
||||
ReceiptsRoot: bytesutil.SafeCopyBytes(h.ReceiptsRoot),
|
||||
LogsBloom: bytesutil.SafeCopyBytes(h.LogsBloom),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(h.PrevRandao),
|
||||
BlockNumber: uint64(h.BlockNumber),
|
||||
GasLimit: uint64(h.GasLimit),
|
||||
GasUsed: uint64(h.GasUsed),
|
||||
Timestamp: uint64(h.Timestamp),
|
||||
ExtraData: h.ExtraData,
|
||||
BaseFeePerGas: h.BaseFeePerGas.SSZBytes(),
|
||||
BlockHash: h.BlockHash,
|
||||
TransactionsRoot: h.TransactionsRoot,
|
||||
WithdrawalsRoot: h.WithdrawalsRoot,
|
||||
ExtraData: bytesutil.SafeCopyBytes(h.ExtraData),
|
||||
BaseFeePerGas: bytesutil.SafeCopyBytes(h.BaseFeePerGas.SSZBytes()),
|
||||
BlockHash: bytesutil.SafeCopyBytes(h.BlockHash),
|
||||
TransactionsRoot: bytesutil.SafeCopyBytes(h.TransactionsRoot),
|
||||
WithdrawalsRoot: bytesutil.SafeCopyBytes(h.WithdrawalsRoot),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BuilderBidCapella is field of ExecHeaderResponseCapella.
|
||||
type BuilderBidCapella struct {
|
||||
Header *ExecutionPayloadHeaderCapella `json:"header"`
|
||||
Value Uint256 `json:"value"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey"`
|
||||
}
|
||||
|
||||
// ExecutionPayloadHeaderCapella is a field in BuilderBidCapella.
|
||||
type ExecutionPayloadHeaderCapella struct {
|
||||
ParentHash hexutil.Bytes `json:"parent_hash"`
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||
@@ -489,6 +521,7 @@ type ExecutionPayloadHeaderCapella struct {
|
||||
*v1.ExecutionPayloadHeaderCapella
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte representation of ExecutionPayloadHeaderCapella.
|
||||
func (h *ExecutionPayloadHeaderCapella) MarshalJSON() ([]byte, error) {
|
||||
type MarshalCaller ExecutionPayloadHeaderCapella
|
||||
baseFeePerGas, err := sszBytesToUint256(h.ExecutionPayloadHeaderCapella.BaseFeePerGas)
|
||||
@@ -514,6 +547,7 @@ func (h *ExecutionPayloadHeaderCapella) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalJSON takes a JSON byte array and sets ExecutionPayloadHeaderCapella.
|
||||
func (h *ExecutionPayloadHeaderCapella) UnmarshalJSON(b []byte) error {
|
||||
type UnmarshalCaller ExecutionPayloadHeaderCapella
|
||||
uc := &UnmarshalCaller{}
|
||||
@@ -527,11 +561,13 @@ func (h *ExecutionPayloadHeaderCapella) UnmarshalJSON(b []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecPayloadResponseCapella is the builder API /eth/v1/builder/blinded_blocks for Capella.
|
||||
type ExecPayloadResponseCapella struct {
|
||||
Version string `json:"version"`
|
||||
Data ExecutionPayloadCapella `json:"data"`
|
||||
}
|
||||
|
||||
// ExecutionPayloadCapella is a field of ExecPayloadResponseCapella.
|
||||
type ExecutionPayloadCapella struct {
|
||||
ParentHash hexutil.Bytes `json:"parent_hash"`
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||
@@ -550,43 +586,46 @@ type ExecutionPayloadCapella struct {
|
||||
Withdrawals []Withdrawal `json:"withdrawals"`
|
||||
}
|
||||
|
||||
// ToProto returns a ExecutionPayloadCapella Proto.
|
||||
func (r *ExecPayloadResponseCapella) ToProto() (*v1.ExecutionPayloadCapella, error) {
|
||||
return r.Data.ToProto()
|
||||
}
|
||||
|
||||
// ToProto returns a ExecutionPayloadCapella Proto.
|
||||
func (p *ExecutionPayloadCapella) ToProto() (*v1.ExecutionPayloadCapella, error) {
|
||||
txs := make([][]byte, len(p.Transactions))
|
||||
for i := range p.Transactions {
|
||||
txs[i] = p.Transactions[i]
|
||||
txs[i] = bytesutil.SafeCopyBytes(p.Transactions[i])
|
||||
}
|
||||
withdrawals := make([]*v1.Withdrawal, len(p.Withdrawals))
|
||||
for i, w := range p.Withdrawals {
|
||||
withdrawals[i] = &v1.Withdrawal{
|
||||
Index: w.Index.Uint64(),
|
||||
ValidatorIndex: types.ValidatorIndex(w.ValidatorIndex.Uint64()),
|
||||
Address: w.Address,
|
||||
Address: bytesutil.SafeCopyBytes(w.Address),
|
||||
Amount: w.Amount.Uint64(),
|
||||
}
|
||||
}
|
||||
return &v1.ExecutionPayloadCapella{
|
||||
ParentHash: p.ParentHash,
|
||||
FeeRecipient: p.FeeRecipient,
|
||||
StateRoot: p.StateRoot,
|
||||
ReceiptsRoot: p.ReceiptsRoot,
|
||||
LogsBloom: p.LogsBloom,
|
||||
PrevRandao: p.PrevRandao,
|
||||
ParentHash: bytesutil.SafeCopyBytes(p.ParentHash),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(p.FeeRecipient),
|
||||
StateRoot: bytesutil.SafeCopyBytes(p.StateRoot),
|
||||
ReceiptsRoot: bytesutil.SafeCopyBytes(p.ReceiptsRoot),
|
||||
LogsBloom: bytesutil.SafeCopyBytes(p.LogsBloom),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(p.PrevRandao),
|
||||
BlockNumber: uint64(p.BlockNumber),
|
||||
GasLimit: uint64(p.GasLimit),
|
||||
GasUsed: uint64(p.GasUsed),
|
||||
Timestamp: uint64(p.Timestamp),
|
||||
ExtraData: p.ExtraData,
|
||||
BaseFeePerGas: p.BaseFeePerGas.SSZBytes(),
|
||||
BlockHash: p.BlockHash,
|
||||
ExtraData: bytesutil.SafeCopyBytes(p.ExtraData),
|
||||
BaseFeePerGas: bytesutil.SafeCopyBytes(p.BaseFeePerGas.SSZBytes()),
|
||||
BlockHash: bytesutil.SafeCopyBytes(p.BlockHash),
|
||||
Transactions: txs,
|
||||
Withdrawals: withdrawals,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Withdrawal is a field of ExecutionPayloadCapella.
|
||||
type Withdrawal struct {
|
||||
Index Uint256 `json:"index"`
|
||||
ValidatorIndex Uint256 `json:"validator_index"`
|
||||
@@ -594,18 +633,22 @@ type Withdrawal struct {
|
||||
Amount Uint256 `json:"amount"`
|
||||
}
|
||||
|
||||
// SignedBlindedBeaconBlockBellatrix is the request object for builder API /eth/v1/builder/blinded_blocks.
|
||||
type SignedBlindedBeaconBlockBellatrix struct {
|
||||
*eth.SignedBlindedBeaconBlockBellatrix
|
||||
}
|
||||
|
||||
// BlindedBeaconBlockBellatrix is a field in SignedBlindedBeaconBlockBellatrix.
|
||||
type BlindedBeaconBlockBellatrix struct {
|
||||
*eth.BlindedBeaconBlockBellatrix
|
||||
}
|
||||
|
||||
// BlindedBeaconBlockBodyBellatrix is a field in BlindedBeaconBlockBellatrix.
|
||||
type BlindedBeaconBlockBodyBellatrix struct {
|
||||
*eth.BlindedBeaconBlockBodyBellatrix
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of SignedBlindedBeaconBlockBellatrix.
|
||||
func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *BlindedBeaconBlockBellatrix `json:"message"`
|
||||
@@ -616,6 +659,7 @@ func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockBellatrix.
|
||||
func (b *BlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Slot string `json:"slot"`
|
||||
@@ -632,10 +676,12 @@ func (b *BlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// ProposerSlashing is a field in BlindedBeaconBlockBodyCapella.
|
||||
type ProposerSlashing struct {
|
||||
*eth.ProposerSlashing
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of ProposerSlashing.
|
||||
func (s *ProposerSlashing) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
SignedHeader1 *SignedBeaconBlockHeader `json:"signed_header_1"`
|
||||
@@ -646,10 +692,12 @@ func (s *ProposerSlashing) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// SignedBeaconBlockHeader is a field of ProposerSlashing.
|
||||
type SignedBeaconBlockHeader struct {
|
||||
*eth.SignedBeaconBlockHeader
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of SignedBeaconBlockHeader.
|
||||
func (h *SignedBeaconBlockHeader) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Header *BeaconBlockHeader `json:"message"`
|
||||
@@ -660,10 +708,12 @@ func (h *SignedBeaconBlockHeader) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// BeaconBlockHeader is a field of SignedBeaconBlockHeader.
|
||||
type BeaconBlockHeader struct {
|
||||
*eth.BeaconBlockHeader
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of BeaconBlockHeader.
|
||||
func (h *BeaconBlockHeader) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Slot string `json:"slot"`
|
||||
@@ -680,10 +730,12 @@ func (h *BeaconBlockHeader) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// IndexedAttestation is a field of AttesterSlashing.
|
||||
type IndexedAttestation struct {
|
||||
*eth.IndexedAttestation
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of IndexedAttestation.
|
||||
func (a *IndexedAttestation) MarshalJSON() ([]byte, error) {
|
||||
indices := make([]string, len(a.IndexedAttestation.AttestingIndices))
|
||||
for i := range a.IndexedAttestation.AttestingIndices {
|
||||
@@ -700,10 +752,12 @@ func (a *IndexedAttestation) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// AttesterSlashing is a field of a Beacon Block Body.
|
||||
type AttesterSlashing struct {
|
||||
*eth.AttesterSlashing
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of AttesterSlashing.
|
||||
func (s *AttesterSlashing) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Attestation1 *IndexedAttestation `json:"attestation_1"`
|
||||
@@ -714,10 +768,12 @@ func (s *AttesterSlashing) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// Checkpoint is a field of AttestationData.
|
||||
type Checkpoint struct {
|
||||
*eth.Checkpoint
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of Checkpoint.
|
||||
func (c *Checkpoint) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Epoch string `json:"epoch"`
|
||||
@@ -728,10 +784,12 @@ func (c *Checkpoint) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// AttestationData is a field of IndexedAttestation.
|
||||
type AttestationData struct {
|
||||
*eth.AttestationData
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of AttestationData.
|
||||
func (a *AttestationData) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Slot string `json:"slot"`
|
||||
@@ -748,10 +806,12 @@ func (a *AttestationData) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// Attestation is a field of Beacon Block Body.
|
||||
type Attestation struct {
|
||||
*eth.Attestation
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of Attestation.
|
||||
func (a *Attestation) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
AggregationBits hexutil.Bytes `json:"aggregation_bits"`
|
||||
@@ -764,10 +824,12 @@ func (a *Attestation) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// DepositData is a field of Deposit.
|
||||
type DepositData struct {
|
||||
*eth.Deposit_Data
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of DepositData.
|
||||
func (d *DepositData) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
PublicKey hexutil.Bytes `json:"pubkey"`
|
||||
@@ -782,10 +844,12 @@ func (d *DepositData) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// Deposit is a field of Beacon Block Body.
|
||||
type Deposit struct {
|
||||
*eth.Deposit
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of Deposit.
|
||||
func (d *Deposit) MarshalJSON() ([]byte, error) {
|
||||
proof := make([]hexutil.Bytes, len(d.Proof))
|
||||
for i := range d.Proof {
|
||||
@@ -800,10 +864,12 @@ func (d *Deposit) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// SignedVoluntaryExit is a field of Beacon Block Body.
|
||||
type SignedVoluntaryExit struct {
|
||||
*eth.SignedVoluntaryExit
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of SignedVoluntaryExit.
|
||||
func (sve *SignedVoluntaryExit) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *VoluntaryExit `json:"message"`
|
||||
@@ -814,10 +880,12 @@ func (sve *SignedVoluntaryExit) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// VoluntaryExit is a field in SignedVoluntaryExit
|
||||
type VoluntaryExit struct {
|
||||
*eth.VoluntaryExit
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of VoluntaryExit
|
||||
func (ve *VoluntaryExit) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Epoch string `json:"epoch"`
|
||||
@@ -828,10 +896,12 @@ func (ve *VoluntaryExit) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// SyncAggregate is a field of Beacon Block Body.
|
||||
type SyncAggregate struct {
|
||||
*eth.SyncAggregate
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of SyncAggregate.
|
||||
func (s *SyncAggregate) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
SyncCommitteeBits hexutil.Bytes `json:"sync_committee_bits"`
|
||||
@@ -842,10 +912,12 @@ func (s *SyncAggregate) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// Eth1Data is a field of Beacon Block Body.
|
||||
type Eth1Data struct {
|
||||
*eth.Eth1Data
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of Eth1Data.
|
||||
func (e *Eth1Data) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
DepositRoot hexutil.Bytes `json:"deposit_root"`
|
||||
@@ -858,6 +930,7 @@ func (e *Eth1Data) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockBodyBellatrix.
|
||||
func (b *BlindedBeaconBlockBodyBellatrix) MarshalJSON() ([]byte, error) {
|
||||
sve := make([]*SignedVoluntaryExit, len(b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits {
|
||||
@@ -904,10 +977,12 @@ func (b *BlindedBeaconBlockBodyBellatrix) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// SignedBLSToExecutionChange is a field in Beacon Block Body for capella and above.
|
||||
type SignedBLSToExecutionChange struct {
|
||||
*eth.SignedBLSToExecutionChange
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of SignedBLSToExecutionChange.
|
||||
func (ch *SignedBLSToExecutionChange) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *BLSToExecutionChange `json:"message"`
|
||||
@@ -918,10 +993,12 @@ func (ch *SignedBLSToExecutionChange) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// BLSToExecutionChange is a field in SignedBLSToExecutionChange.
|
||||
type BLSToExecutionChange struct {
|
||||
*eth.BLSToExecutionChange
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of BLSToExecutionChange.
|
||||
func (ch *BLSToExecutionChange) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
@@ -934,18 +1011,22 @@ func (ch *BLSToExecutionChange) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// SignedBlindedBeaconBlockCapella is part of the request object sent to builder API /eth/v1/builder/blinded_blocks for Capella.
|
||||
type SignedBlindedBeaconBlockCapella struct {
|
||||
*eth.SignedBlindedBeaconBlockCapella
|
||||
}
|
||||
|
||||
// BlindedBeaconBlockCapella is a field in SignedBlindedBeaconBlockCapella.
|
||||
type BlindedBeaconBlockCapella struct {
|
||||
*eth.BlindedBeaconBlockCapella
|
||||
}
|
||||
|
||||
// BlindedBeaconBlockBodyCapella is a field in BlindedBeaconBlockCapella.
|
||||
type BlindedBeaconBlockBodyCapella struct {
|
||||
*eth.BlindedBeaconBlockBodyCapella
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of SignedBlindedBeaconBlockCapella.
|
||||
func (b *SignedBlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *BlindedBeaconBlockCapella `json:"message"`
|
||||
@@ -956,6 +1037,7 @@ func (b *SignedBlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockCapella
|
||||
func (b *BlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Slot string `json:"slot"`
|
||||
@@ -972,6 +1054,7 @@ func (b *BlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockBodyCapella
|
||||
func (b *BlindedBeaconBlockBodyCapella) MarshalJSON() ([]byte, error) {
|
||||
sve := make([]*SignedVoluntaryExit, len(b.VoluntaryExits))
|
||||
for i := range b.VoluntaryExits {
|
||||
@@ -1024,6 +1107,7 @@ func (b *BlindedBeaconBlockBodyCapella) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
// ErrorMessage is a JSON representation of the builder API's returned error message.
|
||||
type ErrorMessage struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
|
||||
@@ -1156,6 +1156,14 @@ func TestUint256Unmarshal(t *testing.T) {
|
||||
require.Equal(t, expected, string(m))
|
||||
}
|
||||
|
||||
func TestUint256Unmarshal_BadData(t *testing.T) {
|
||||
var bigNum Uint256
|
||||
|
||||
assert.ErrorContains(t, "provided Uint256 json string is too short", bigNum.UnmarshalJSON([]byte{'"'}))
|
||||
assert.ErrorContains(t, "provided Uint256 json string is malformed", bigNum.UnmarshalJSON([]byte{'"', '1', '2'}))
|
||||
|
||||
}
|
||||
|
||||
func TestUint256UnmarshalNegative(t *testing.T) {
|
||||
m := "-1"
|
||||
var value Uint256
|
||||
|
||||
97
api/client/client.go
Normal file
97
api/client/client.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Client is a wrapper object around the HTTP client.
|
||||
type Client struct {
|
||||
hc *http.Client
|
||||
baseURL *url.URL
|
||||
token string
|
||||
}
|
||||
|
||||
// NewClient constructs a new client with the provided options (ex WithTimeout).
|
||||
// `host` is the base host + port used to construct request urls. This value can be
|
||||
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
|
||||
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
||||
u, err := urlForHost(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
hc: &http.Client{},
|
||||
baseURL: u,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(c)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Token returns the bearer token used for jwt authentication
|
||||
func (c *Client) Token() string {
|
||||
return c.token
|
||||
}
|
||||
|
||||
// BaseURL returns the base url of the client
|
||||
func (c *Client) BaseURL() *url.URL {
|
||||
return c.baseURL
|
||||
}
|
||||
|
||||
// Do execute the request against the http client
|
||||
func (c *Client) Do(req *http.Request) (*http.Response, error) {
|
||||
return c.hc.Do(req)
|
||||
}
|
||||
|
||||
func urlForHost(h string) (*url.URL, error) {
|
||||
// try to parse as url (being permissive)
|
||||
u, err := url.Parse(h)
|
||||
if err == nil && u.Host != "" {
|
||||
return u, nil
|
||||
}
|
||||
// try to parse as host:port
|
||||
host, port, err := net.SplitHostPort(h)
|
||||
if err != nil {
|
||||
return nil, ErrMalformedHostname
|
||||
}
|
||||
return &url.URL{Host: net.JoinHostPort(host, port), Scheme: "http"}, nil
|
||||
}
|
||||
|
||||
// NodeURL returns a human-readable string representation of the beacon node base url.
|
||||
func (c *Client) NodeURL() string {
|
||||
return c.baseURL.String()
|
||||
}
|
||||
|
||||
// Get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
|
||||
func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byte, error) {
|
||||
u := c.baseURL.ResolveReference(&url.URL{Path: path})
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(req)
|
||||
}
|
||||
r, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
err = r.Body.Close()
|
||||
}()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return nil, Non200Err(r)
|
||||
}
|
||||
b, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error reading http response body")
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
48
api/client/client_test.go
Normal file
48
api/client/client_test.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestValidHostname(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
hostArg string
|
||||
path string
|
||||
joined string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "hostname without port",
|
||||
hostArg: "mydomain.org",
|
||||
err: ErrMalformedHostname,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
cl, err := NewClient(c.hostArg)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.joined, cl.BaseURL().ResolveReference(&url.URL{Path: c.path}).String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithAuthenticationToken(t *testing.T) {
|
||||
cl, err := NewClient("https://www.offchainlabs.com:3500", WithAuthenticationToken("my token"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cl.Token(), "my token")
|
||||
}
|
||||
|
||||
func TestBaseURL(t *testing.T) {
|
||||
cl, err := NewClient("https://www.offchainlabs.com:3500")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "www.offchainlabs.com", cl.BaseURL().Hostname())
|
||||
require.Equal(t, "3500", cl.BaseURL().Port())
|
||||
}
|
||||
40
api/client/errors.go
Normal file
40
api/client/errors.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ErrMalformedHostname is used to indicate if a host name's format is incorrect.
|
||||
var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
|
||||
|
||||
// ErrNotOK is used to indicate when an HTTP request to the API failed with any non-2xx response code.
|
||||
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
|
||||
var ErrNotOK = errors.New("did not receive 2xx response from API")
|
||||
|
||||
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
|
||||
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
|
||||
|
||||
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version API response format was not recognized.
|
||||
var ErrInvalidNodeVersion = errors.New("invalid node version response")
|
||||
|
||||
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
|
||||
func Non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
var body string
|
||||
if err != nil {
|
||||
body = "(Unable to read response body.)"
|
||||
} else {
|
||||
body = "response body:\n" + string(bodyBytes)
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
switch response.StatusCode {
|
||||
case 404:
|
||||
return errors.Wrap(ErrNotFound, msg)
|
||||
default:
|
||||
return errors.Wrap(ErrNotOK, msg)
|
||||
}
|
||||
}
|
||||
48
api/client/options.go
Normal file
48
api/client/options.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ReqOption is a request functional option.
|
||||
type ReqOption func(*http.Request)
|
||||
|
||||
// WithSSZEncoding is a request functional option that adds SSZ encoding header.
|
||||
func WithSSZEncoding() ReqOption {
|
||||
return func(req *http.Request) {
|
||||
req.Header.Set("Accept", "application/octet-stream")
|
||||
}
|
||||
}
|
||||
|
||||
// WithAuthorizationToken is a request functional option that adds header for authorization token.
|
||||
func WithAuthorizationToken(token string) ReqOption {
|
||||
return func(req *http.Request) {
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
|
||||
}
|
||||
}
|
||||
|
||||
// ClientOpt is a functional option for the Client type (http.Client wrapper)
|
||||
type ClientOpt func(*Client)
|
||||
|
||||
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
|
||||
func WithTimeout(timeout time.Duration) ClientOpt {
|
||||
return func(c *Client) {
|
||||
c.hc.Timeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
// WithRoundTripper replaces the underlying HTTP's transport with a custom one.
|
||||
func WithRoundTripper(t http.RoundTripper) ClientOpt {
|
||||
return func(c *Client) {
|
||||
c.hc.Transport = t
|
||||
}
|
||||
}
|
||||
|
||||
// WithAuthenticationToken sets an oauth token to be used.
|
||||
func WithAuthenticationToken(token string) ClientOpt {
|
||||
return func(c *Client) {
|
||||
c.token = token
|
||||
}
|
||||
}
|
||||
13
api/client/validator/BUILD.bazel
Normal file
13
api/client/validator/BUILD.bazel
Normal file
@@ -0,0 +1,13 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["client.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/validator",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//validator/rpc/apimiddleware:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
121
api/client/validator/client.go
Normal file
121
api/client/validator/client.go
Normal file
@@ -0,0 +1,121 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v4/validator/rpc/apimiddleware"
|
||||
)
|
||||
|
||||
const (
|
||||
localKeysPath = "/eth/v1/keystores"
|
||||
remoteKeysPath = "/eth/v1/remotekeys"
|
||||
feeRecipientPath = "/eth/v1/validator/{pubkey}/feerecipient"
|
||||
)
|
||||
|
||||
// Client provides a collection of helper methods for calling the Keymanager API endpoints.
|
||||
type Client struct {
|
||||
*client.Client
|
||||
}
|
||||
|
||||
// NewClient returns a new Client that includes functions for REST calls to keymanager APIs.
|
||||
func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
|
||||
c, err := client.NewClient(host, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Client{c}, nil
|
||||
}
|
||||
|
||||
// GetValidatorPubKeys gets the current list of web3signer or the local validator public keys in hex format.
|
||||
func (c *Client) GetValidatorPubKeys(ctx context.Context) ([]string, error) {
|
||||
jsonlocal, err := c.GetLocalValidatorKeys(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jsonremote, err := c.GetRemoteValidatorKeys(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(jsonlocal.Keystores) == 0 && len(jsonremote.Keystores) == 0 {
|
||||
return nil, errors.New("there are no local keys or remote keys on the validator")
|
||||
}
|
||||
|
||||
hexKeys := make(map[string]bool)
|
||||
|
||||
for index := range jsonlocal.Keystores {
|
||||
hexKeys[jsonlocal.Keystores[index].ValidatingPubkey] = true
|
||||
}
|
||||
for index := range jsonremote.Keystores {
|
||||
hexKeys[jsonremote.Keystores[index].Pubkey] = true
|
||||
}
|
||||
keys := make([]string, 0)
|
||||
for k := range hexKeys {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// GetLocalValidatorKeys calls the keymanager APIs for local validator keys
|
||||
func (c *Client) GetLocalValidatorKeys(ctx context.Context) (*apimiddleware.ListKeystoresResponseJson, error) {
|
||||
localBytes, err := c.Get(ctx, localKeysPath, client.WithAuthorizationToken(c.Token()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jsonlocal := &apimiddleware.ListKeystoresResponseJson{}
|
||||
if err := json.Unmarshal(localBytes, jsonlocal); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse local keystore list")
|
||||
}
|
||||
return jsonlocal, nil
|
||||
}
|
||||
|
||||
// GetRemoteValidatorKeys calls the keymanager APIs for web3signer validator keys
|
||||
func (c *Client) GetRemoteValidatorKeys(ctx context.Context) (*apimiddleware.ListRemoteKeysResponseJson, error) {
|
||||
remoteBytes, err := c.Get(ctx, remoteKeysPath, client.WithAuthorizationToken(c.Token()))
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), "Prysm Wallet is not of type Web3Signer") {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
jsonremote := &apimiddleware.ListRemoteKeysResponseJson{}
|
||||
if len(remoteBytes) != 0 {
|
||||
if err := json.Unmarshal(remoteBytes, jsonremote); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse remote keystore list")
|
||||
}
|
||||
}
|
||||
return jsonremote, nil
|
||||
}
|
||||
|
||||
// GetFeeRecipientAddresses takes a list of validators in hex format and returns an equal length list of fee recipients in hex format.
|
||||
func (c *Client) GetFeeRecipientAddresses(ctx context.Context, validators []string) ([]string, error) {
|
||||
feeRecipients := make([]string, len(validators))
|
||||
for index, validator := range validators {
|
||||
feejson, err := c.GetFeeRecipientAddress(ctx, validator)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("keymanager API failed to retrieve fee recipient for validator %s", validators[index]))
|
||||
}
|
||||
if feejson.Data == nil {
|
||||
continue
|
||||
}
|
||||
feeRecipients[index] = feejson.Data.Ethaddress
|
||||
}
|
||||
return feeRecipients, nil
|
||||
}
|
||||
|
||||
// GetFeeRecipientAddress takes a public key and calls the keymanager API to return its fee recipient.
|
||||
func (c *Client) GetFeeRecipientAddress(ctx context.Context, pubkey string) (*apimiddleware.GetFeeRecipientByPubkeyResponseJson, error) {
|
||||
path := strings.Replace(feeRecipientPath, "{pubkey}", pubkey, 1)
|
||||
b, err := c.Get(ctx, path, client.WithAuthorizationToken(c.Token()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
feejson := &apimiddleware.GetFeeRecipientByPubkeyResponseJson{}
|
||||
if err := json.Unmarshal(b, feejson); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse fee recipient")
|
||||
}
|
||||
return feejson, nil
|
||||
}
|
||||
@@ -144,6 +144,7 @@ func (f *Feed) Send(value interface{}) (nsent int) {
|
||||
|
||||
if !f.typecheck(rvalue.Type()) {
|
||||
f.sendLock <- struct{}{}
|
||||
f.mu.Unlock()
|
||||
panic(feedTypeError{op: "Send", got: rvalue.Type(), want: f.etype})
|
||||
}
|
||||
f.mu.Unlock()
|
||||
|
||||
@@ -32,6 +32,8 @@ func TestFeedPanics(t *testing.T) {
|
||||
f.Send(2)
|
||||
want := feedTypeError{op: "Send", got: reflect.TypeOf(uint64(0)), want: reflect.TypeOf(0)}
|
||||
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
|
||||
// Validate it doesn't deadlock.
|
||||
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
|
||||
}
|
||||
{
|
||||
var f Feed
|
||||
|
||||
@@ -340,7 +340,13 @@ func (s *Service) IsOptimistic(_ context.Context) (bool, error) {
|
||||
}
|
||||
s.headLock.RLock()
|
||||
headRoot := s.head.root
|
||||
headSlot := s.head.slot
|
||||
headOptimistic := s.head.optimistic
|
||||
s.headLock.RUnlock()
|
||||
// we trust the head package for recent head slots, otherwise fallback to forkchoice
|
||||
if headSlot+2 >= s.CurrentSlot() {
|
||||
return headOptimistic, nil
|
||||
}
|
||||
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
|
||||
@@ -422,6 +422,12 @@ func TestService_IsOptimistic(t *testing.T) {
|
||||
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Slot(0), c.CurrentSlot())
|
||||
require.Equal(t, false, opt)
|
||||
|
||||
c.SetGenesisTime(time.Now().Add(-time.Second * time.Duration(4*params.BeaconConfig().SecondsPerSlot)))
|
||||
opt, err = c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, opt)
|
||||
}
|
||||
|
||||
|
||||
@@ -71,7 +71,6 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
|
||||
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
|
||||
hasAttr, attr, proposerId := s.getPayloadAttribute(ctx, arg.headState, nextSlot, arg.headRoot[:])
|
||||
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
|
||||
if err != nil {
|
||||
switch err {
|
||||
@@ -154,7 +153,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
var pId [8]byte
|
||||
copy(pId[:], payloadID[:])
|
||||
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId, arg.headRoot)
|
||||
} else if hasAttr && payloadID == nil {
|
||||
} else if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
|
||||
"slot": headBlk.Slot(),
|
||||
|
||||
@@ -47,9 +47,11 @@ func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
|
||||
|
||||
// This defines the current chain service's view of head.
|
||||
type head struct {
|
||||
root [32]byte // current head root.
|
||||
block interfaces.ReadOnlySignedBeaconBlock // current head block.
|
||||
state state.BeaconState // current head state.
|
||||
root [32]byte // current head root.
|
||||
block interfaces.ReadOnlySignedBeaconBlock // current head block.
|
||||
state state.BeaconState // current head state.
|
||||
slot primitives.Slot // the head block slot number
|
||||
optimistic bool // optimistic status when saved head
|
||||
}
|
||||
|
||||
// This saves head info to the local service cache, it also saves the
|
||||
@@ -94,6 +96,10 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
return errors.Wrap(err, "could not get old head root")
|
||||
}
|
||||
oldHeadRoot := bytesutil.ToBytes32(r)
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check if node is optimistically synced")
|
||||
}
|
||||
if headBlock.Block().ParentRoot() != oldHeadRoot {
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
commonRoot, forkSlot, err := s.cfg.ForkChoiceStore.CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
|
||||
@@ -125,10 +131,6 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
reorgDistance.Observe(float64(dis))
|
||||
reorgDepth.Observe(float64(dep))
|
||||
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if node is optimistically synced")
|
||||
}
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Reorg,
|
||||
Data: ðpbv1.EventChainReorg{
|
||||
@@ -150,7 +152,14 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
}
|
||||
|
||||
// Cache the new head info.
|
||||
if err := s.setHead(newHeadRoot, headBlock, headState); err != nil {
|
||||
newHead := &head{
|
||||
root: newHeadRoot,
|
||||
block: headBlock,
|
||||
state: headState,
|
||||
optimistic: isOptimistic,
|
||||
slot: headBlock.Block().Slot(),
|
||||
}
|
||||
if err := s.setHead(newHead); err != nil {
|
||||
return errors.Wrap(err, "could not set head")
|
||||
}
|
||||
|
||||
@@ -195,20 +204,22 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedB
|
||||
return nil
|
||||
}
|
||||
|
||||
// This sets head view object which is used to track the head slot, root, block and state.
|
||||
func (s *Service) setHead(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState) error {
|
||||
// This sets head view object which is used to track the head slot, root, block, state and optimistic status
|
||||
func (s *Service) setHead(newHead *head) error {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
// This does a full copy of the block and state.
|
||||
bCp, err := block.Copy()
|
||||
bCp, err := newHead.block.Copy()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.head = &head{
|
||||
root: root,
|
||||
block: bCp,
|
||||
state: state.Copy(),
|
||||
root: newHead.root,
|
||||
block: bCp,
|
||||
state: newHead.state.Copy(),
|
||||
optimistic: newHead.optimistic,
|
||||
slot: newHead.slot,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -157,7 +157,11 @@ func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives
|
||||
if headState == nil || headState.IsNil() {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, slot)
|
||||
headRoot, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
@@ -15,7 +16,7 @@ import (
|
||||
|
||||
func TestService_HeadSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Current period
|
||||
@@ -38,7 +39,7 @@ func TestService_HeadSyncCommitteeIndices(t *testing.T) {
|
||||
|
||||
func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
|
||||
@@ -66,7 +67,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
|
||||
|
||||
func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
|
||||
@@ -81,7 +82,7 @@ func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
|
||||
|
||||
func TestService_HeadSyncCommitteeDomain(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())
|
||||
|
||||
@@ -53,7 +53,7 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
|
||||
txs, err := p.Transactions()
|
||||
switch {
|
||||
case errors.Is(err, consensus_types.ErrUnsupportedGetter):
|
||||
case errors.Is(err, consensus_types.ErrUnsupportedField):
|
||||
case err != nil:
|
||||
return err
|
||||
default:
|
||||
@@ -120,7 +120,7 @@ func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
|
||||
fields := logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash())),
|
||||
"blockNumber": payload.BlockNumber,
|
||||
"blockNumber": payload.BlockNumber(),
|
||||
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
|
||||
}
|
||||
if block.Version() >= version.Capella {
|
||||
|
||||
@@ -172,3 +172,10 @@ func WithClockSynchronizer(gs *startup.ClockSynchronizer) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithSyncComplete(c chan struct{}) Option {
|
||||
return func(s *Service) error {
|
||||
s.syncComplete = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
|
||||
return nil, errors.Wrap(err, "could not check checkpoint condition in forkchoice")
|
||||
}
|
||||
if !ok {
|
||||
return nil, ErrNotCheckpoint
|
||||
return nil, errors.Wrap(ErrNotCheckpoint, fmt.Sprintf("epoch %d root %#x", c.Epoch, c.Root))
|
||||
}
|
||||
|
||||
// Fallback to state regeneration.
|
||||
|
||||
@@ -180,7 +180,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}))
|
||||
|
||||
s2, err := service.getAttPreState(ctx, cp2)
|
||||
require.ErrorIs(t, ErrNotCheckpoint, err)
|
||||
require.ErrorContains(t, "epoch 2 root 0x4200000000000000000000000000000000000000000000000000000000000000: not a checkpoint in forkchoice", err)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte(cp2.Root), [32]byte(cp1.Root), [32]byte{'R'}, cp2, cp2)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -136,7 +136,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not validate new payload")
|
||||
}
|
||||
if isValidPayload {
|
||||
if signed.Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preStateVersion, preStateHeader, signed); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -285,7 +285,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
}()
|
||||
}
|
||||
defer reportAttestationInclusion(b)
|
||||
if err := s.handleEpochBoundary(ctx, postState); err != nil {
|
||||
if err := s.handleEpochBoundary(ctx, postState, blockRoot[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
@@ -483,14 +483,14 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
|
||||
}
|
||||
|
||||
// Epoch boundary bookkeeping such as logging epoch summaries.
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, postState state.BeaconState) error {
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, postState state.BeaconState, blockRoot []byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
|
||||
defer span.End()
|
||||
|
||||
var err error
|
||||
if postState.Slot()+1 == s.nextEpochBoundarySlot {
|
||||
copied := postState.Copy()
|
||||
copied, err := transition.ProcessSlots(ctx, copied, copied.Slot()+1)
|
||||
copied, err := transition.ProcessSlotsUsingNextSlotCache(ctx, copied, blockRoot, copied.Slot()+1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -501,7 +501,28 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, copied); err != nil {
|
||||
return err
|
||||
}
|
||||
if s.nextEpochBoundarySlot != 0 {
|
||||
ep := slots.ToEpoch(s.nextEpochBoundarySlot)
|
||||
_, nextProposerIndexToSlots, err := helpers.CommitteeAssignments(ctx, copied, ep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range nextProposerIndexToSlots {
|
||||
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(v[0], k, [8]byte{}, [32]byte{})
|
||||
}
|
||||
}
|
||||
} else if postState.Slot() >= s.nextEpochBoundarySlot {
|
||||
postState = postState.Copy()
|
||||
if s.nextEpochBoundarySlot != 0 {
|
||||
ep := slots.ToEpoch(s.nextEpochBoundarySlot)
|
||||
_, nextProposerIndexToSlots, err := helpers.CommitteeAssignments(ctx, postState, ep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range nextProposerIndexToSlots {
|
||||
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(v[0], k, [8]byte{}, [32]byte{})
|
||||
}
|
||||
}
|
||||
s.nextEpochBoundarySlot, err = slots.EpochStart(coreTime.NextEpoch(postState))
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -651,26 +672,23 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
||||
|
||||
// This routine checks if there is a cached proposer payload ID available for the next slot proposer.
|
||||
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
|
||||
func (s *Service) spawnLateBlockTasksLoop() {
|
||||
go func() {
|
||||
_, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("spawnLateBlockTasksLoop encountered an error waiting for initialization")
|
||||
func (s *Service) runLateBlockTasks() {
|
||||
if err := s.waitForSync(); err != nil {
|
||||
log.WithError(err).Error("failed to wait for initial sync")
|
||||
return
|
||||
}
|
||||
|
||||
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
|
||||
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C():
|
||||
s.lateBlockTasks(s.ctx)
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
return
|
||||
}
|
||||
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
|
||||
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C():
|
||||
s.lateBlockTasks(s.ctx)
|
||||
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// lateBlockTasks is called 4 seconds into the slot and performs tasks
|
||||
@@ -685,12 +703,26 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
Type: statefeed.MissedSlot,
|
||||
})
|
||||
|
||||
headRoot := s.headRoot()
|
||||
headState := s.headState(ctx)
|
||||
lastRoot, lastState := transition.LastCachedState()
|
||||
if lastState == nil {
|
||||
lastRoot, lastState = headRoot[:], headState
|
||||
}
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("could not update next slot state cache")
|
||||
}
|
||||
|
||||
// Head root should be empty when retrieving proposer index for the next slot.
|
||||
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* head root */)
|
||||
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
|
||||
if (!has && !features.Get().PrepareAllPayloads) || id != [8]byte{} {
|
||||
return
|
||||
}
|
||||
|
||||
s.headLock.RLock()
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
@@ -698,8 +730,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
|
||||
return
|
||||
}
|
||||
headRoot := s.headRoot()
|
||||
headState := s.headState(ctx)
|
||||
s.headLock.RUnlock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: headState,
|
||||
@@ -709,11 +739,14 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
}
|
||||
lastRoot, lastState := transition.LastCachedState()
|
||||
if lastState == nil {
|
||||
lastRoot, lastState = headRoot[:], headState
|
||||
}
|
||||
if err = transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("could not update next slot state cache")
|
||||
}
|
||||
|
||||
// waitForSync blocks until the node is synced to the head.
|
||||
func (s *Service) waitForSync() error {
|
||||
select {
|
||||
case <-s.syncComplete:
|
||||
return nil
|
||||
case <-s.ctx.Done():
|
||||
return errors.New("context closed, exiting goroutine")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -636,7 +636,7 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 1024)
|
||||
service.head = &head{state: s}
|
||||
require.NoError(t, s.SetSlot(2*params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(t, service.handleEpochBoundary(ctx, s))
|
||||
require.NoError(t, service.handleEpochBoundary(ctx, s, []byte{}))
|
||||
require.Equal(t, 3*params.BeaconConfig().SlotsPerEpoch, service.nextEpochBoundarySlot)
|
||||
}
|
||||
|
||||
|
||||
@@ -94,8 +94,10 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-pat.C():
|
||||
log.Infof("proposer_mocker: calling updated head via offset ticker")
|
||||
s.UpdateHead(s.ctx, s.CurrentSlot()+1)
|
||||
case <-st.C():
|
||||
log.Infof("proposer_mocker: calling updated head via normal slot ticker in spawn atts")
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, s.CurrentSlot()); err != nil {
|
||||
log.WithError(err).Error("could not process new slot")
|
||||
@@ -124,6 +126,8 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
}
|
||||
s.processAttestations(ctx, disparity)
|
||||
|
||||
log.Infof("proposer_mocker: process attestations in fc took %s", time.Since(start).String())
|
||||
|
||||
processAttsElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
start = time.Now()
|
||||
@@ -136,11 +140,14 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
s.headLock.RUnlock()
|
||||
}
|
||||
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
log.Infof("proposer_mocker: head root in fc took %s", time.Since(start).String())
|
||||
|
||||
changed, err := s.forkchoiceUpdateWithExecution(s.ctx, newHeadRoot, proposingSlot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not update forkchoice")
|
||||
}
|
||||
log.Infof("proposer_mocker: fcu call in fc took %s", time.Since(start).String())
|
||||
|
||||
if changed {
|
||||
s.headLock.RLock()
|
||||
log.WithFields(logrus.Fields{
|
||||
|
||||
@@ -60,6 +60,7 @@ type Service struct {
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
clockSetter startup.ClockSetter
|
||||
clockWaiter startup.ClockWaiter
|
||||
syncComplete chan struct{}
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -130,7 +131,7 @@ func (s *Service) Start() {
|
||||
}
|
||||
}
|
||||
s.spawnProcessAttestationsRoutine()
|
||||
s.spawnLateBlockTasksLoop()
|
||||
go s.runLateBlockTasks()
|
||||
}
|
||||
|
||||
// Stop the blockchain service's main event loop and associated goroutines.
|
||||
@@ -307,7 +308,13 @@ func (s *Service) initializeHeadFromDB(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block")
|
||||
}
|
||||
if err := s.setHead(finalizedRoot, finalizedBlock, finalizedState); err != nil {
|
||||
if err := s.setHead(&head{
|
||||
finalizedRoot,
|
||||
finalizedBlock,
|
||||
finalizedState,
|
||||
finalizedBlock.Block().Slot(),
|
||||
false,
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "could not set head")
|
||||
}
|
||||
|
||||
@@ -439,7 +446,13 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
|
||||
|
||||
if err := s.setHead(genesisBlkRoot, genesisBlk, genesisState); err != nil {
|
||||
if err := s.setHead(&head{
|
||||
genesisBlkRoot,
|
||||
genesisBlk,
|
||||
genesisState,
|
||||
genesisBlk.Block().Slot(),
|
||||
false,
|
||||
}); err != nil {
|
||||
log.WithError(err).Fatal("Could not set head")
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -3,6 +3,7 @@ package builder
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
buildertesting "github.com/prysmaticlabs/prysm/v4/api/client/builder/testing"
|
||||
blockchainTesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
@@ -38,6 +39,21 @@ func Test_RegisterValidator(t *testing.T) {
|
||||
assert.Equal(t, true, builder.RegisteredVals[pubkey])
|
||||
}
|
||||
|
||||
func Test_RegisterValidator_WithCache(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
headFetcher := &blockchainTesting.ChainService{}
|
||||
builder := buildertesting.NewClient()
|
||||
s, err := NewService(ctx, WithRegistrationCache(), WithHeadFetcher(headFetcher), WithBuilderClient(&builder))
|
||||
require.NoError(t, err)
|
||||
pubkey := bytesutil.ToBytes48([]byte("pubkey"))
|
||||
var feeRecipient [20]byte
|
||||
reg := ð.ValidatorRegistrationV1{Pubkey: pubkey[:], Timestamp: uint64(time.Now().UTC().Unix()), FeeRecipient: feeRecipient[:]}
|
||||
require.NoError(t, s.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{{Message: reg}}))
|
||||
registration, err := s.registrationCache.RegistrationByIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, reg, registration)
|
||||
}
|
||||
|
||||
func Test_BuilderMethodsWithouClient(t *testing.T) {
|
||||
s, err := NewService(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
2
beacon-chain/cache/BUILD.bazel
vendored
2
beacon-chain/cache/BUILD.bazel
vendored
@@ -34,6 +34,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//cache/lru:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
@@ -87,7 +88,6 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
33
beacon-chain/cache/payload_id.go
vendored
33
beacon-chain/cache/payload_id.go
vendored
@@ -4,35 +4,41 @@ import (
|
||||
"bytes"
|
||||
"sync"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
)
|
||||
|
||||
const keyLength = 40
|
||||
const vIdLength = 8
|
||||
const pIdLength = 8
|
||||
const vpIdsLength = vIdLength + pIdLength
|
||||
|
||||
// ProposerPayloadIDsCache is a cache of proposer payload IDs.
|
||||
// The key is the slot. The value is the concatenation of the proposer and payload IDs. 8 bytes each.
|
||||
// The key is the concatenation of the slot and the block root.
|
||||
// The value is the concatenation of the proposer and payload IDs, 8 bytes each.
|
||||
type ProposerPayloadIDsCache struct {
|
||||
slotToProposerAndPayloadIDs map[[40]byte][vpIdsLength]byte
|
||||
slotToProposerAndPayloadIDs map[[keyLength]byte][vpIdsLength]byte
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// NewProposerPayloadIDsCache creates a new proposer payload IDs cache.
|
||||
func NewProposerPayloadIDsCache() *ProposerPayloadIDsCache {
|
||||
return &ProposerPayloadIDsCache{
|
||||
slotToProposerAndPayloadIDs: make(map[[40]byte][vpIdsLength]byte),
|
||||
slotToProposerAndPayloadIDs: make(map[[keyLength]byte][vpIdsLength]byte),
|
||||
}
|
||||
}
|
||||
|
||||
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot.
|
||||
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(slot primitives.Slot, r [32]byte) (primitives.ValidatorIndex, [8]byte, bool) {
|
||||
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot and head root to build the block.
|
||||
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(
|
||||
slot primitives.Slot,
|
||||
r [fieldparams.RootLength]byte,
|
||||
) (primitives.ValidatorIndex, [pIdLength]byte, bool) {
|
||||
f.RLock()
|
||||
defer f.RUnlock()
|
||||
ids, ok := f.slotToProposerAndPayloadIDs[idKey(slot, r)]
|
||||
if !ok {
|
||||
return 0, [8]byte{}, false
|
||||
return 0, [pIdLength]byte{}, false
|
||||
}
|
||||
vId := ids[:vIdLength]
|
||||
|
||||
@@ -43,8 +49,13 @@ func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(slot primitives.Slot, r
|
||||
return primitives.ValidatorIndex(bytesutil.BytesToUint64BigEndian(vId)), pId, true
|
||||
}
|
||||
|
||||
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot.
|
||||
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(slot primitives.Slot, vId primitives.ValidatorIndex, pId [8]byte, r [32]byte) {
|
||||
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot and head root to build block.
|
||||
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(
|
||||
slot primitives.Slot,
|
||||
vId primitives.ValidatorIndex,
|
||||
pId [pIdLength]byte,
|
||||
r [fieldparams.RootLength]byte,
|
||||
) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
var vIdBytes [vIdLength]byte
|
||||
@@ -63,7 +74,7 @@ func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(slot primitives.Slot,
|
||||
}
|
||||
}
|
||||
|
||||
// PrunePayloadIDs removes the payload id entries that's current than input slot.
|
||||
// PrunePayloadIDs removes the payload ID entries older than input slot.
|
||||
func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot primitives.Slot) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
@@ -76,8 +87,8 @@ func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot primitives.Slot) {
|
||||
}
|
||||
}
|
||||
|
||||
func idKey(slot primitives.Slot, r [32]byte) [40]byte {
|
||||
var k [40]byte
|
||||
func idKey(slot primitives.Slot, r [fieldparams.RootLength]byte) [keyLength]byte {
|
||||
var k [keyLength]byte
|
||||
copy(k[:], append(bytesutil.Uint64ToBytesBigEndian(uint64(slot)), r[:]...))
|
||||
return k
|
||||
}
|
||||
|
||||
27
beacon-chain/cache/registration.go
vendored
27
beacon-chain/cache/registration.go
vendored
@@ -3,15 +3,11 @@ package cache
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -38,33 +34,10 @@ func (regCache *RegistrationCache) RegistrationByIndex(id primitives.ValidatorIn
|
||||
regCache.lock.RUnlock()
|
||||
return nil, errors.Wrapf(ErrNotFoundRegistration, "validator id %d", id)
|
||||
}
|
||||
isExpired, err := RegistrationTimeStampExpired(v.Timestamp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to check registration expiration")
|
||||
}
|
||||
if isExpired {
|
||||
regCache.lock.RUnlock()
|
||||
regCache.lock.Lock()
|
||||
defer regCache.lock.Unlock()
|
||||
delete(regCache.indexToRegistration, id)
|
||||
log.Warnf("registration for validator index %d expired at unix time %d", id, v.Timestamp)
|
||||
return nil, errors.Wrapf(ErrNotFoundRegistration, "validator id %d", id)
|
||||
}
|
||||
regCache.lock.RUnlock()
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func RegistrationTimeStampExpired(ts uint64) (bool, error) {
|
||||
// safely convert unint64 to int64
|
||||
i, err := math.Int(ts)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
expiryDuration := params.BeaconConfig().RegistrationDuration
|
||||
// registered time + expiration duration < current time = expired
|
||||
return time.Unix(int64(i), 0).Add(expiryDuration).Before(time.Now()), nil
|
||||
}
|
||||
|
||||
// UpdateIndexToRegisteredMap adds or updates values in the cache based on the argument.
|
||||
func (regCache *RegistrationCache) UpdateIndexToRegisteredMap(ctx context.Context, m map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1) {
|
||||
_, span := trace.StartSpan(ctx, "RegistrationCache.UpdateIndexToRegisteredMap")
|
||||
|
||||
40
beacon-chain/cache/registration_test.go
vendored
40
beacon-chain/cache/registration_test.go
vendored
@@ -6,15 +6,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestRegistrationCache(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
pubkey, err := hexutil.Decode("0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
||||
require.NoError(t, err)
|
||||
validatorIndex := primitives.ValidatorIndex(1)
|
||||
@@ -31,29 +28,14 @@ func TestRegistrationCache(t *testing.T) {
|
||||
reg, err := cache.RegistrationByIndex(validatorIndex)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(reg.Pubkey), string(pubkey))
|
||||
t.Run("Registration expired", func(t *testing.T) {
|
||||
validatorIndex2 := primitives.ValidatorIndex(2)
|
||||
overExpirationPadTime := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*4) // 4 epochs
|
||||
m[validatorIndex2] = ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: []byte{},
|
||||
GasLimit: 100,
|
||||
Timestamp: uint64(time.Now().Add(-1 * overExpirationPadTime).Unix()),
|
||||
Pubkey: pubkey,
|
||||
}
|
||||
cache.UpdateIndexToRegisteredMap(context.Background(), m)
|
||||
_, err := cache.RegistrationByIndex(validatorIndex2)
|
||||
require.ErrorContains(t, "no validator registered", err)
|
||||
require.LogsContain(t, hook, "expired")
|
||||
})
|
||||
t.Run("Registration close to expiration still passes", func(t *testing.T) {
|
||||
t.Run("successfully updates", func(t *testing.T) {
|
||||
pubkey, err := hexutil.Decode("0x88247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
||||
require.NoError(t, err)
|
||||
validatorIndex2 := primitives.ValidatorIndex(2)
|
||||
overExpirationPadTime := time.Second * time.Duration((params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*3)-5) // 3 epochs - 5 seconds
|
||||
m[validatorIndex2] = ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: []byte{},
|
||||
GasLimit: 100,
|
||||
Timestamp: uint64(time.Now().Add(-1 * overExpirationPadTime).Unix()),
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: pubkey,
|
||||
}
|
||||
cache.UpdateIndexToRegisteredMap(context.Background(), m)
|
||||
@@ -62,21 +44,3 @@ func TestRegistrationCache(t *testing.T) {
|
||||
require.Equal(t, string(reg.Pubkey), string(pubkey))
|
||||
})
|
||||
}
|
||||
|
||||
func Test_RegistrationTimeStampExpired(t *testing.T) {
|
||||
// expiration set at 3 epochs
|
||||
t.Run("expired registration", func(t *testing.T) {
|
||||
overExpirationPadTime := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*4) // 4 epochs
|
||||
ts := uint64(time.Now().Add(-1 * overExpirationPadTime).Unix())
|
||||
isExpired, err := RegistrationTimeStampExpired(ts)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, isExpired)
|
||||
})
|
||||
t.Run("is not expired registration", func(t *testing.T) {
|
||||
overExpirationPadTime := time.Second * time.Duration((params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*3)-5) // 3 epochs -5 seconds
|
||||
ts := uint64(time.Now().Add(-1 * overExpirationPadTime).Unix())
|
||||
isExpired, err := RegistrationTimeStampExpired(ts)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, isExpired)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package altair
|
||||
|
||||
import (
|
||||
"context"
|
||||
goErrors "errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -22,6 +23,10 @@ import (
|
||||
|
||||
const maxRandomByte = uint64(1<<8 - 1)
|
||||
|
||||
var (
|
||||
ErrTooLate = errors.New("sync message is too late")
|
||||
)
|
||||
|
||||
// ValidateNilSyncContribution validates the following fields are not nil:
|
||||
// -the contribution and proof itself
|
||||
// -the message within contribution and proof
|
||||
@@ -217,7 +222,7 @@ func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockD
|
||||
upperBound := time.Now().Add(clockDisparity)
|
||||
// Verify sync message slot is within the time range.
|
||||
if messageTime.Before(lowerBound) || messageTime.After(upperBound) {
|
||||
return fmt.Errorf(
|
||||
syncErr := fmt.Errorf(
|
||||
"sync message time %v (slot %d) not within allowable range of %v (slot %d) to %v (slot %d)",
|
||||
messageTime,
|
||||
slot,
|
||||
@@ -226,6 +231,11 @@ func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockD
|
||||
upperBound,
|
||||
uint64(upperBound.Unix()-genesisTime.Unix())/params.BeaconConfig().SecondsPerSlot,
|
||||
)
|
||||
// Wrap error message if sync message is too late.
|
||||
if messageTime.Before(lowerBound) {
|
||||
syncErr = goErrors.Join(ErrTooLate, syncErr)
|
||||
}
|
||||
return syncErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ func IsExecutionBlock(body interfaces.ReadOnlyBeaconBlockBody) (bool, error) {
|
||||
}
|
||||
payload, err := body.Execution()
|
||||
switch {
|
||||
case errors.Is(err, consensus_types.ErrUnsupportedGetter):
|
||||
case errors.Is(err, consensus_types.ErrUnsupportedField):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, err
|
||||
|
||||
@@ -38,6 +38,7 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -68,7 +69,6 @@ go_test(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -8,13 +8,16 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrTooLate = errors.New("attestation is too late")
|
||||
)
|
||||
|
||||
// ValidateNilAttestation checks if any composite field of input attestation is nil.
|
||||
// Access to these nil fields will result in run time panic,
|
||||
// it is recommended to run these checks as first line of defense.
|
||||
@@ -66,25 +69,6 @@ func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
|
||||
return binary.LittleEndian.Uint64(b[:8])%modulo == 0, nil
|
||||
}
|
||||
|
||||
// AggregateSignature returns the aggregated signature of the input attestations.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature:
|
||||
// signatures = [attestation.signature for attestation in attestations]
|
||||
// return bls.Aggregate(signatures)
|
||||
func AggregateSignature(attestations []*ethpb.Attestation) (bls.Signature, error) {
|
||||
sigs := make([]bls.Signature, len(attestations))
|
||||
var err error
|
||||
for i := 0; i < len(sigs); i++ {
|
||||
sigs[i], err = bls.SignatureFromBytes(attestations[i].Signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return bls.AggregateSignatures(sigs), nil
|
||||
}
|
||||
|
||||
// IsAggregated returns true if the attestation is an aggregated attestation,
|
||||
// false otherwise.
|
||||
func IsAggregated(attestation *ethpb.Attestation) bool {
|
||||
@@ -184,7 +168,7 @@ func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clo
|
||||
)
|
||||
if attTime.Before(lowerBounds) {
|
||||
attReceivedTooLateCount.Inc()
|
||||
return attError
|
||||
return errors.Join(ErrTooLate, attError)
|
||||
}
|
||||
if attTime.After(upperBounds) {
|
||||
attReceivedTooEarlyCount.Inc()
|
||||
|
||||
@@ -10,8 +10,6 @@ import (
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
@@ -45,44 +43,6 @@ func TestAttestation_IsAggregator(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestAttestation_AggregateSignature(t *testing.T) {
|
||||
t.Run("verified", func(t *testing.T) {
|
||||
pubkeys := make([]bls.PublicKey, 0, 100)
|
||||
atts := make([]*ethpb.Attestation, 0, 100)
|
||||
msg := bytesutil.ToBytes32([]byte("hello"))
|
||||
for i := 0; i < 100; i++ {
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pub := priv.PublicKey()
|
||||
sig := priv.Sign(msg[:])
|
||||
pubkeys = append(pubkeys, pub)
|
||||
att := ðpb.Attestation{Signature: sig.Marshal()}
|
||||
atts = append(atts, att)
|
||||
}
|
||||
aggSig, err := helpers.AggregateSignature(atts)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, aggSig.FastAggregateVerify(pubkeys, msg), "Signature did not verify")
|
||||
})
|
||||
|
||||
t.Run("not verified", func(t *testing.T) {
|
||||
pubkeys := make([]bls.PublicKey, 0, 100)
|
||||
atts := make([]*ethpb.Attestation, 0, 100)
|
||||
msg := []byte("hello")
|
||||
for i := 0; i < 100; i++ {
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pub := priv.PublicKey()
|
||||
sig := priv.Sign(msg)
|
||||
pubkeys = append(pubkeys, pub)
|
||||
att := ðpb.Attestation{Signature: sig.Marshal()}
|
||||
atts = append(atts, att)
|
||||
}
|
||||
aggSig, err := helpers.AggregateSignature(atts[0 : len(atts)-2])
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, aggSig.FastAggregateVerify(pubkeys, bytesutil.ToBytes32(msg)), "Signature not suppose to verify")
|
||||
})
|
||||
}
|
||||
|
||||
func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
|
||||
// Create 10 committees
|
||||
committeeCount := uint64(10)
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var CommitteeCacheInProgressHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
@@ -396,3 +397,22 @@ func isEligibleForActivation(activationEligibilityEpoch, activationEpoch, finali
|
||||
return activationEligibilityEpoch <= finalizedEpoch &&
|
||||
activationEpoch == params.BeaconConfig().FarFutureEpoch
|
||||
}
|
||||
|
||||
// LastActivatedValidatorIndex provides the last activated validator given a state
|
||||
func LastActivatedValidatorIndex(ctx context.Context, st state.ReadOnlyBeaconState) (primitives.ValidatorIndex, error) {
|
||||
_, span := trace.StartSpan(ctx, "helpers.LastActivatedValidatorIndex")
|
||||
defer span.End()
|
||||
var lastActivatedvalidatorIndex primitives.ValidatorIndex
|
||||
// linear search because status are not sorted
|
||||
for j := st.NumValidators() - 1; j >= 0; j-- {
|
||||
val, err := st.ValidatorAtIndexReadOnly(primitives.ValidatorIndex(j))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if IsActiveValidatorUsingTrie(val, time.CurrentEpoch(st)) {
|
||||
lastActivatedvalidatorIndex = primitives.ValidatorIndex(j)
|
||||
break
|
||||
}
|
||||
}
|
||||
return lastActivatedvalidatorIndex, nil
|
||||
}
|
||||
|
||||
@@ -727,3 +727,26 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLastActivatedValidatorIndex_OK(t *testing.T) {
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
|
||||
validators := make([]*ethpb.Validator, 4)
|
||||
balances := make([]uint64, len(validators))
|
||||
for i := uint64(0); i < 4; i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, params.BeaconConfig().BLSPubkeyLength),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: 32 * 1e9,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
balances[i] = validators[i].EffectiveBalance
|
||||
}
|
||||
require.NoError(t, beaconState.SetValidators(validators))
|
||||
require.NoError(t, beaconState.SetBalances(balances))
|
||||
|
||||
index, err := LastActivatedValidatorIndex(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, index, primitives.ValidatorIndex(3))
|
||||
}
|
||||
|
||||
@@ -130,9 +130,15 @@ func performValidatorStateMigration(ctx context.Context, bar *progressbar.Progre
|
||||
return err
|
||||
}
|
||||
item := enc
|
||||
if hasAltairKey(item) {
|
||||
switch {
|
||||
case hasAltairKey(enc):
|
||||
item = item[len(altairKey):]
|
||||
case hasBellatrixKey(enc):
|
||||
item = item[len(bellatrixKey):]
|
||||
case hasCapellaKey(enc):
|
||||
item = item[len(capellaKey):]
|
||||
}
|
||||
|
||||
detector, err := detect.FromState(item)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -165,9 +171,14 @@ func performValidatorStateMigration(ctx context.Context, bar *progressbar.Progre
|
||||
return err
|
||||
}
|
||||
var stateBytes []byte
|
||||
if hasAltairKey(enc) {
|
||||
switch {
|
||||
case hasAltairKey(enc):
|
||||
stateBytes = snappy.Encode(nil, append(altairKey, rawObj...))
|
||||
} else {
|
||||
case hasBellatrixKey(enc):
|
||||
stateBytes = snappy.Encode(nil, append(bellatrixKey, rawObj...))
|
||||
case hasCapellaKey(enc):
|
||||
stateBytes = snappy.Encode(nil, append(capellaKey, rawObj...))
|
||||
default:
|
||||
stateBytes = snappy.Encode(nil, rawObj)
|
||||
}
|
||||
if stateErr := stateBkt.Put(keys[index], stateBytes); stateErr != nil {
|
||||
|
||||
@@ -313,3 +313,217 @@ func Test_migrateAltairStateValidators(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_migrateBellatrixStateValidators(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator)
|
||||
eval func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator)
|
||||
}{
|
||||
{
|
||||
name: "migrates validators and adds them to new buckets",
|
||||
setup: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
|
||||
// create some new buckets that should be present for this migration
|
||||
err := dbStore.db.Update(func(tx *bbolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists(stateValidatorsBucket)
|
||||
assert.NoError(t, err)
|
||||
_, err = tx.CreateBucketIfNotExists(blockRootValidatorHashesBucket)
|
||||
assert.NoError(t, err)
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
eval: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
|
||||
// check whether the new buckets are present
|
||||
err := dbStore.db.View(func(tx *bbolt.Tx) error {
|
||||
valBkt := tx.Bucket(stateValidatorsBucket)
|
||||
assert.NotNil(t, valBkt)
|
||||
idxBkt := tx.Bucket(blockRootValidatorHashesBucket)
|
||||
assert.NotNil(t, idxBkt)
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check if the migration worked
|
||||
blockRoot := [32]byte{'A'}
|
||||
rcvdState, err := dbStore.State(context.Background(), blockRoot)
|
||||
assert.NoError(t, err)
|
||||
require.DeepSSZEqual(t, rcvdState.ToProtoUnsafe(), state.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching")
|
||||
|
||||
// find hashes of the validators that are set as part of the state
|
||||
var hashes []byte
|
||||
var individualHashes [][]byte
|
||||
for _, val := range vals {
|
||||
hash, hashErr := val.HashTreeRoot()
|
||||
assert.NoError(t, hashErr)
|
||||
hashes = append(hashes, hash[:]...)
|
||||
individualHashes = append(individualHashes, hash[:])
|
||||
}
|
||||
|
||||
// check if all the validators that were in the state, are stored properly in the validator bucket
|
||||
pbState, err := state_native.ProtobufBeaconStateBellatrix(rcvdState.ToProtoUnsafe())
|
||||
assert.NoError(t, err)
|
||||
validatorsFoundCount := 0
|
||||
for _, val := range pbState.Validators {
|
||||
hash, hashErr := val.HashTreeRoot()
|
||||
assert.NoError(t, hashErr)
|
||||
found := false
|
||||
for _, h := range individualHashes {
|
||||
if bytes.Equal(hash[:], h) {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, found)
|
||||
validatorsFoundCount++
|
||||
}
|
||||
require.Equal(t, len(vals), validatorsFoundCount)
|
||||
|
||||
// check if the state validator indexes are stored properly
|
||||
err = dbStore.db.View(func(tx *bbolt.Tx) error {
|
||||
rcvdValhashBytes := tx.Bucket(blockRootValidatorHashesBucket).Get(blockRoot[:])
|
||||
rcvdValHashes, sErr := snappy.Decode(nil, rcvdValhashBytes)
|
||||
assert.NoError(t, sErr)
|
||||
require.DeepEqual(t, hashes, rcvdValHashes)
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dbStore := setupDB(t)
|
||||
|
||||
// add a state with the given validators
|
||||
vals := validators(10)
|
||||
blockRoot := [32]byte{'A'}
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 20)
|
||||
err := st.SetFork(&v1alpha1.Fork{
|
||||
PreviousVersion: params.BeaconConfig().AltairForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
Epoch: 0,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, st.SetSlot(100))
|
||||
assert.NoError(t, st.SetValidators(vals))
|
||||
assert.NoError(t, dbStore.SaveState(context.Background(), st, blockRoot))
|
||||
|
||||
// enable historical state representation flag to test this
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableHistoricalSpaceRepresentation: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
tt.setup(t, dbStore, st, vals)
|
||||
assert.NoError(t, migrateStateValidators(context.Background(), dbStore.db), "migrateArchivedIndex(tx) error")
|
||||
tt.eval(t, dbStore, st, vals)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_migrateCapellaStateValidators(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator)
|
||||
eval func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator)
|
||||
}{
|
||||
{
|
||||
name: "migrates validators and adds them to new buckets",
|
||||
setup: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
|
||||
// create some new buckets that should be present for this migration
|
||||
err := dbStore.db.Update(func(tx *bbolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists(stateValidatorsBucket)
|
||||
assert.NoError(t, err)
|
||||
_, err = tx.CreateBucketIfNotExists(blockRootValidatorHashesBucket)
|
||||
assert.NoError(t, err)
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
eval: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
|
||||
// check whether the new buckets are present
|
||||
err := dbStore.db.View(func(tx *bbolt.Tx) error {
|
||||
valBkt := tx.Bucket(stateValidatorsBucket)
|
||||
assert.NotNil(t, valBkt)
|
||||
idxBkt := tx.Bucket(blockRootValidatorHashesBucket)
|
||||
assert.NotNil(t, idxBkt)
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check if the migration worked
|
||||
blockRoot := [32]byte{'A'}
|
||||
rcvdState, err := dbStore.State(context.Background(), blockRoot)
|
||||
assert.NoError(t, err)
|
||||
require.DeepSSZEqual(t, rcvdState.ToProtoUnsafe(), state.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching")
|
||||
|
||||
// find hashes of the validators that are set as part of the state
|
||||
var hashes []byte
|
||||
var individualHashes [][]byte
|
||||
for _, val := range vals {
|
||||
hash, hashErr := val.HashTreeRoot()
|
||||
assert.NoError(t, hashErr)
|
||||
hashes = append(hashes, hash[:]...)
|
||||
individualHashes = append(individualHashes, hash[:])
|
||||
}
|
||||
|
||||
// check if all the validators that were in the state, are stored properly in the validator bucket
|
||||
pbState, err := state_native.ProtobufBeaconStateCapella(rcvdState.ToProtoUnsafe())
|
||||
assert.NoError(t, err)
|
||||
validatorsFoundCount := 0
|
||||
for _, val := range pbState.Validators {
|
||||
hash, hashErr := val.HashTreeRoot()
|
||||
assert.NoError(t, hashErr)
|
||||
found := false
|
||||
for _, h := range individualHashes {
|
||||
if bytes.Equal(hash[:], h) {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, found)
|
||||
validatorsFoundCount++
|
||||
}
|
||||
require.Equal(t, len(vals), validatorsFoundCount)
|
||||
|
||||
// check if the state validator indexes are stored properly
|
||||
err = dbStore.db.View(func(tx *bbolt.Tx) error {
|
||||
rcvdValhashBytes := tx.Bucket(blockRootValidatorHashesBucket).Get(blockRoot[:])
|
||||
rcvdValHashes, sErr := snappy.Decode(nil, rcvdValhashBytes)
|
||||
assert.NoError(t, sErr)
|
||||
require.DeepEqual(t, hashes, rcvdValHashes)
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dbStore := setupDB(t)
|
||||
|
||||
// add a state with the given validators
|
||||
vals := validators(10)
|
||||
blockRoot := [32]byte{'A'}
|
||||
st, _ := util.DeterministicGenesisStateCapella(t, 20)
|
||||
err := st.SetFork(&v1alpha1.Fork{
|
||||
PreviousVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
Epoch: 0,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, st.SetSlot(100))
|
||||
assert.NoError(t, st.SetValidators(vals))
|
||||
assert.NoError(t, dbStore.SaveState(context.Background(), st, blockRoot))
|
||||
|
||||
// enable historical state representation flag to test this
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableHistoricalSpaceRepresentation: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
tt.setup(t, dbStore, st, vals)
|
||||
assert.NoError(t, migrateStateValidators(context.Background(), dbStore.db), "migrateArchivedIndex(tx) error")
|
||||
tt.eval(t, dbStore, st, vals)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,28 +115,32 @@ func FuzzExchangeTransitionConfiguration(f *testing.F) {
|
||||
|
||||
func FuzzExecutionPayload(f *testing.F) {
|
||||
logsBloom := [256]byte{'j', 'u', 'n', 'k'}
|
||||
execData := &engine.ExecutableData{
|
||||
ParentHash: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
FeeRecipient: common.Address([20]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}),
|
||||
StateRoot: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
ReceiptsRoot: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
LogsBloom: logsBloom[:],
|
||||
Random: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
Number: math.MaxUint64,
|
||||
GasLimit: math.MaxUint64,
|
||||
GasUsed: math.MaxUint64,
|
||||
Timestamp: 100,
|
||||
ExtraData: nil,
|
||||
BaseFeePerGas: big.NewInt(math.MaxInt),
|
||||
BlockHash: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
Transactions: [][]byte{{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}, {0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}, {0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}, {0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}},
|
||||
execData := &engine.ExecutionPayloadEnvelope{
|
||||
ExecutionPayload: &engine.ExecutableData{
|
||||
ParentHash: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
FeeRecipient: common.Address([20]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}),
|
||||
StateRoot: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
ReceiptsRoot: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
LogsBloom: logsBloom[:],
|
||||
Random: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
Number: math.MaxUint64,
|
||||
GasLimit: math.MaxUint64,
|
||||
GasUsed: math.MaxUint64,
|
||||
Timestamp: 100,
|
||||
ExtraData: nil,
|
||||
BaseFeePerGas: big.NewInt(math.MaxInt),
|
||||
BlockHash: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
Transactions: [][]byte{{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}, {0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}, {0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}, {0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}},
|
||||
Withdrawals: []*types.Withdrawal{},
|
||||
},
|
||||
BlockValue: nil,
|
||||
}
|
||||
output, err := json.Marshal(execData)
|
||||
assert.NoError(f, err)
|
||||
f.Add(output)
|
||||
f.Fuzz(func(t *testing.T, jsonBlob []byte) {
|
||||
gethResp := &engine.ExecutableData{}
|
||||
prysmResp := &pb.ExecutionPayload{}
|
||||
gethResp := &engine.ExecutionPayloadEnvelope{}
|
||||
prysmResp := &pb.ExecutionPayloadCapellaWithValue{}
|
||||
gethErr := json.Unmarshal(jsonBlob, gethResp)
|
||||
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, fmt.Sprintf("geth and prysm unmarshaller return inconsistent errors. %v and %v", gethErr, prysmErr))
|
||||
@@ -147,10 +151,10 @@ func FuzzExecutionPayload(f *testing.F) {
|
||||
gethBlob, gethErr := json.Marshal(gethResp)
|
||||
prysmBlob, prysmErr := json.Marshal(prysmResp)
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, "geth and prysm unmarshaller return inconsistent errors")
|
||||
newGethResp := &engine.ExecutableData{}
|
||||
newGethResp := &engine.ExecutionPayloadEnvelope{}
|
||||
newGethErr := json.Unmarshal(prysmBlob, newGethResp)
|
||||
assert.NoError(t, newGethErr)
|
||||
newGethResp2 := &engine.ExecutableData{}
|
||||
newGethResp2 := &engine.ExecutionPayloadEnvelope{}
|
||||
newGethErr = json.Unmarshal(gethBlob, newGethResp2)
|
||||
assert.NoError(t, newGethErr)
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ func New() *ForkChoice {
|
||||
|
||||
b := make([]uint64, 0)
|
||||
v := make([]Vote, 0)
|
||||
return &ForkChoice{store: s, balances: b, votes: v}
|
||||
return &ForkChoice{store: s, balances: b, votes: v, fcLock: new(fcLock)}
|
||||
}
|
||||
|
||||
// NodeCount returns the current number of nodes in the Store.
|
||||
@@ -265,6 +265,7 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
|
||||
// validators' latest votes.
|
||||
func (f *ForkChoice) updateBalances() error {
|
||||
newBalances := f.justifiedBalances
|
||||
zHash := params.BeaconConfig().ZeroHash
|
||||
|
||||
for index, vote := range f.votes {
|
||||
// Skip if validator has been slashed
|
||||
@@ -273,7 +274,7 @@ func (f *ForkChoice) updateBalances() error {
|
||||
}
|
||||
// Skip if validator has never voted for current root and next root (i.e. if the
|
||||
// votes are zero hash aka genesis block), there's nothing to compute.
|
||||
if vote.currentRoot == params.BeaconConfig().ZeroHash && vote.nextRoot == params.BeaconConfig().ZeroHash {
|
||||
if vote.currentRoot == zHash && vote.nextRoot == zHash {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -293,7 +294,7 @@ func (f *ForkChoice) updateBalances() error {
|
||||
// Ignore the vote if the root is not in fork choice
|
||||
// store, that means we have not seen the block before.
|
||||
nextNode, ok := f.store.nodeByRoot[vote.nextRoot]
|
||||
if ok && vote.nextRoot != params.BeaconConfig().ZeroHash {
|
||||
if ok && vote.nextRoot != zHash {
|
||||
// Protection against nil node
|
||||
if nextNode == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not update balances")
|
||||
@@ -302,7 +303,7 @@ func (f *ForkChoice) updateBalances() error {
|
||||
}
|
||||
|
||||
currentNode, ok := f.store.nodeByRoot[vote.currentRoot]
|
||||
if ok && vote.currentRoot != params.BeaconConfig().ZeroHash {
|
||||
if ok && vote.currentRoot != zHash {
|
||||
// Protection against nil node
|
||||
if currentNode == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not update balances")
|
||||
|
||||
@@ -31,9 +31,7 @@ import (
|
||||
// store.justified_checkpoint = store.best_justified_checkpoint
|
||||
func (f *ForkChoice) NewSlot(ctx context.Context, slot primitives.Slot) error {
|
||||
// Reset proposer boost root
|
||||
if err := f.resetBoostedProposerRoot(ctx); err != nil {
|
||||
return errors.Wrap(err, "could not reset boosted proposer root in fork choice")
|
||||
}
|
||||
f.store.proposerBoostRoot = [32]byte{}
|
||||
|
||||
// Return if it's not a new epoch.
|
||||
if !slots.IsEpochStart(slot) {
|
||||
|
||||
@@ -1,19 +1,12 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
)
|
||||
|
||||
// resetBoostedProposerRoot sets the value of the proposer boosted root to zeros.
|
||||
func (f *ForkChoice) resetBoostedProposerRoot(_ context.Context) error {
|
||||
f.store.proposerBoostRoot = [32]byte{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyProposerBoostScore applies the current proposer boost scores to the
|
||||
// relevant nodes.
|
||||
func (f *ForkChoice) applyProposerBoostScore() error {
|
||||
|
||||
@@ -82,6 +82,11 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgWeightThreshold {
|
||||
return
|
||||
}
|
||||
|
||||
// Only orphan a block if the parent LMD vote is strong
|
||||
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
|
||||
return
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -137,6 +142,11 @@ func (f *ForkChoice) GetProposerHead() [32]byte {
|
||||
return head.root
|
||||
}
|
||||
|
||||
// Only orphan a block if the parent LMD vote is strong
|
||||
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
|
||||
return head.root
|
||||
}
|
||||
|
||||
// Only reorg if we are proposing early
|
||||
secs, err := slots.SecondsSinceSlotStart(head.slot+1, f.store.genesisTime, uint64(time.Now().Unix()))
|
||||
if err != nil {
|
||||
|
||||
@@ -22,7 +22,11 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
f.ProcessAttestation(ctx, []uint64{0, 1, 2}, root, 0)
|
||||
attesters := make([]uint64, f.numActiveValidators-64)
|
||||
for i := range attesters {
|
||||
attesters[i] = uint64(i + 64)
|
||||
}
|
||||
f.ProcessAttestation(ctx, attesters, root, 0)
|
||||
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
@@ -80,6 +84,12 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
f.store.headNode.parent = saved
|
||||
})
|
||||
t.Run("parent is weak", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent.weight
|
||||
f.store.headNode.parent.weight = 0
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
f.store.headNode.parent.weight = saved
|
||||
})
|
||||
t.Run("Head is strong", func(t *testing.T) {
|
||||
f.store.headNode.weight = f.store.committeeWeight
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
@@ -101,7 +111,11 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
f.ProcessAttestation(ctx, []uint64{0, 1, 2}, root, 0)
|
||||
attesters := make([]uint64, f.numActiveValidators-64)
|
||||
for i := range attesters {
|
||||
attesters[i] = uint64(i + 64)
|
||||
}
|
||||
f.ProcessAttestation(ctx, attesters, root, 0)
|
||||
|
||||
driftGenesisTime(f, 3, 1)
|
||||
childRoot := [32]byte{'b'}
|
||||
@@ -161,6 +175,12 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
|
||||
require.Equal(t, childRoot, f.GetProposerHead())
|
||||
f.store.headNode.parent = saved
|
||||
})
|
||||
t.Run("parent is weak", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent.weight
|
||||
f.store.headNode.parent.weight = 0
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
f.store.headNode.parent.weight = saved
|
||||
})
|
||||
t.Run("Head is strong", func(t *testing.T) {
|
||||
f.store.headNode.weight = f.store.committeeWeight
|
||||
require.Equal(t, childRoot, f.GetProposerHead())
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"runtime/debug"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
@@ -11,7 +15,7 @@ import (
|
||||
|
||||
// ForkChoice defines the overall fork choice store which includes all block nodes, validator's latest votes and balances.
|
||||
type ForkChoice struct {
|
||||
sync.RWMutex
|
||||
*fcLock
|
||||
store *Store
|
||||
votes []Vote // tracks individual validator's last vote.
|
||||
balances []uint64 // tracks individual validator's balances last accounted in votes.
|
||||
@@ -68,3 +72,52 @@ type Vote struct {
|
||||
nextRoot [fieldparams.RootLength]byte // next voting root.
|
||||
nextEpoch primitives.Epoch // epoch of next voting period.
|
||||
}
|
||||
|
||||
type fcLock struct {
|
||||
lk sync.RWMutex
|
||||
t time.Time
|
||||
currChan chan int
|
||||
}
|
||||
|
||||
func (f *fcLock) Lock() {
|
||||
f.lk.Lock()
|
||||
f.t = time.Now()
|
||||
f.currChan = make(chan int)
|
||||
go func(t time.Time, c chan int) {
|
||||
tim := time.NewTimer(3 * time.Second)
|
||||
select {
|
||||
case <-c:
|
||||
tim.Stop()
|
||||
case <-tim.C:
|
||||
tim.Stop()
|
||||
pfile := pprof.Lookup("goroutine")
|
||||
bf := bytes.NewBuffer([]byte{})
|
||||
err := pfile.WriteTo(bf, 1)
|
||||
_ = err
|
||||
log.Warnf("FC lock is taking longer than 3 seconds with the complete stack of %s", bf.String())
|
||||
}
|
||||
}(time.Now(), f.currChan)
|
||||
}
|
||||
|
||||
func (f *fcLock) Unlock() {
|
||||
t := time.Since(f.t)
|
||||
f.t = time.Time{}
|
||||
close(f.currChan)
|
||||
f.lk.Unlock()
|
||||
if t > time.Second {
|
||||
log.Warnf("FC lock is taking longer than 1 second: %s with the complete stack of %s", t.String(), string(debug.Stack()))
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fcLock) RLock() {
|
||||
t := time.Now()
|
||||
f.lk.RLock()
|
||||
dt := time.Since(t)
|
||||
if dt > time.Second {
|
||||
log.Warnf("FC Rlock is taking longer than 1 second: %s with stack %s", dt.String(), string(debug.Stack()))
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fcLock) RUnlock() {
|
||||
f.lk.RUnlock()
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -29,6 +30,9 @@ func (s *Service) processSyncAggregate(state state.BeaconState, blk interfaces.R
|
||||
if blk == nil || blk.Body() == nil {
|
||||
return
|
||||
}
|
||||
if blk.Version() == version.Phase0 {
|
||||
return
|
||||
}
|
||||
bits, err := blk.Body().SyncAggregate()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get SyncAggregate")
|
||||
|
||||
@@ -114,19 +114,11 @@ func (s *Service) Start() {
|
||||
"ValidatorIndices": tracked,
|
||||
}).Info("Starting service")
|
||||
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
|
||||
go s.run(stateChannel, stateSub)
|
||||
go s.run()
|
||||
}
|
||||
|
||||
// run waits until the beacon is synced and starts the monitoring system.
|
||||
func (s *Service) run(stateChannel chan *feed.Event, stateSub event.Subscription) {
|
||||
if stateChannel == nil {
|
||||
log.Error("State state is nil")
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Service) run() {
|
||||
if err := s.waitForSync(s.config.InitialSyncComplete); err != nil {
|
||||
log.WithError(err)
|
||||
return
|
||||
@@ -154,6 +146,8 @@ func (s *Service) run(stateChannel chan *feed.Event, stateSub event.Subscription
|
||||
s.isLogging = true
|
||||
s.Unlock()
|
||||
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
s.monitorRoutine(stateChannel, stateSub)
|
||||
}
|
||||
|
||||
|
||||
@@ -271,11 +271,9 @@ func TestWaitForSyncCanceled(t *testing.T) {
|
||||
func TestRun(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
|
||||
go func() {
|
||||
s.run(stateChannel, stateSub)
|
||||
s.run()
|
||||
}()
|
||||
close(s.config.InitialSyncComplete)
|
||||
|
||||
|
||||
@@ -230,13 +230,13 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Determinstic Genesis Service")
|
||||
if err := beacon.registerDeterminsticGenesisService(); err != nil {
|
||||
log.Debugln("Registering Deterministic Genesis Service")
|
||||
if err := beacon.registerDeterministicGenesisService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Blockchain Service")
|
||||
if err := beacon.registerBlockchainService(beacon.forkChoicer, synchronizer); err != nil {
|
||||
if err := beacon.registerBlockchainService(beacon.forkChoicer, synchronizer, beacon.initialSyncComplete); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -581,7 +581,8 @@ func (b *BeaconNode) fetchBuilderService() *builder.Service {
|
||||
|
||||
func (b *BeaconNode) registerAttestationPool() error {
|
||||
s, err := attestations.NewService(b.ctx, &attestations.Config{
|
||||
Pool: b.attestationPool,
|
||||
Pool: b.attestationPool,
|
||||
InitialSyncComplete: b.initialSyncComplete,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not register atts pool service")
|
||||
@@ -589,7 +590,7 @@ func (b *BeaconNode) registerAttestationPool() error {
|
||||
return b.services.RegisterService(s)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *startup.ClockSynchronizer) error {
|
||||
func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *startup.ClockSynchronizer, syncComplete chan struct{}) error {
|
||||
var web3Service *execution.Service
|
||||
if err := b.services.FetchService(&web3Service); err != nil {
|
||||
return err
|
||||
@@ -620,6 +621,7 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithFinalizedStateAtStartUp(b.finalizedStateAtStartUp),
|
||||
blockchain.WithProposerIdsCache(b.proposerIdsCache),
|
||||
blockchain.WithClockSynchronizer(gs),
|
||||
blockchain.WithSyncComplete(syncComplete),
|
||||
)
|
||||
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
@@ -742,6 +744,7 @@ func (b *BeaconNode) registerSlasherService() error {
|
||||
SlashingPoolInserter: b.slashingsPool,
|
||||
SyncChecker: syncService,
|
||||
HeadStateFetcher: chainService,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -921,7 +924,7 @@ func (b *BeaconNode) registerGRPCGateway(router *mux.Router) error {
|
||||
return b.services.RegisterService(g)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerDeterminsticGenesisService() error {
|
||||
func (b *BeaconNode) registerDeterministicGenesisService() error {
|
||||
genesisTime := b.cliCtx.Uint64(flags.InteropGenesisTimeFlag.Name)
|
||||
genesisValidators := b.cliCtx.Uint64(flags.InteropNumValidatorsFlag.Name)
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/operations/attestations/kv:go_default_library",
|
||||
"//cache/lru:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
|
||||
@@ -53,27 +53,25 @@ func (c *AttCaches) aggregateUnaggregatedAttestations(ctx context.Context, unagg
|
||||
// Track the unaggregated attestations that aren't able to aggregate.
|
||||
leftOverUnaggregatedAtt := make(map[[32]byte]bool)
|
||||
for _, atts := range attsByDataRoot {
|
||||
aggregatedAtts := make([]*ethpb.Attestation, 0, len(atts))
|
||||
processedAtts, err := attaggregation.Aggregate(atts)
|
||||
aggregated, err := attaggregation.AggregateDisjointOneBitAtts(atts)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "could not aggregate unaggregated attestations")
|
||||
}
|
||||
for _, att := range processedAtts {
|
||||
if helpers.IsAggregated(att) {
|
||||
aggregatedAtts = append(aggregatedAtts, att)
|
||||
} else {
|
||||
h, err := hashFn(att)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
leftOverUnaggregatedAtt[h] = true
|
||||
if aggregated == nil {
|
||||
return errors.New("could not aggregate unaggregated attestations")
|
||||
}
|
||||
if helpers.IsAggregated(aggregated) {
|
||||
if err := c.SaveAggregatedAttestations([]*ethpb.Attestation{aggregated}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := c.SaveAggregatedAttestations(aggregatedAtts); err != nil {
|
||||
return err
|
||||
} else {
|
||||
h, err := hashFn(aggregated)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
leftOverUnaggregatedAtt[h] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the unaggregated attestations from the pool that were successfully aggregated.
|
||||
for _, att := range unaggregatedAtts {
|
||||
h, err := hashFn(att)
|
||||
@@ -87,7 +85,6 @@ func (c *AttCaches) aggregateUnaggregatedAttestations(ctx context.Context, unagg
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -30,6 +30,20 @@ var (
|
||||
Name: "expired_block_atts_total",
|
||||
Help: "The number of expired and deleted block attestations in the pool.",
|
||||
})
|
||||
batchForkChoiceAttsT1 = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "aggregate_attestations_t1",
|
||||
Help: "Captures times of attestation aggregation in milliseconds during the first interval per slot",
|
||||
Buckets: []float64{100, 200, 500, 1000, 1500, 2000, 2500, 3500},
|
||||
},
|
||||
)
|
||||
batchForkChoiceAttsT2 = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "aggregate_attestations_t2",
|
||||
Help: "Captures times of attestation aggregation in milliseconds during the second interval per slot",
|
||||
Buckets: []float64{10, 40, 100, 200, 600},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func (s *Service) updateMetrics() {
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
attaggregation "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation/aggregation/attestations"
|
||||
@@ -14,20 +16,34 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// Prepare attestations for fork choice three times per slot.
|
||||
var prepareForkChoiceAttsPeriod = slots.DivideSlotBy(3 /* times-per-slot */)
|
||||
|
||||
// This prepares fork choice attestations by running batchForkChoiceAtts
|
||||
// every prepareForkChoiceAttsPeriod.
|
||||
func (s *Service) prepareForkChoiceAtts() {
|
||||
ticker := time.NewTicker(prepareForkChoiceAttsPeriod)
|
||||
defer ticker.Stop()
|
||||
intervals := features.Get().AggregateIntervals
|
||||
slotDuration := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
// Adjust intervals for networks with a lower slot duration (Hive, e2e, etc)
|
||||
for {
|
||||
if intervals[len(intervals)-1] >= slotDuration {
|
||||
for i, offset := range intervals {
|
||||
intervals[i] = offset / 2
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
ticker := slots.NewSlotTickerWithIntervals(time.Unix(int64(s.genesisTime), 0), intervals[:])
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-ticker.C():
|
||||
t := time.Now()
|
||||
if err := s.batchForkChoiceAtts(s.ctx); err != nil {
|
||||
log.WithError(err).Error("Could not prepare attestations for fork choice")
|
||||
}
|
||||
if slots.TimeIntoSlot(s.genesisTime) < intervals[1] {
|
||||
batchForkChoiceAttsT1.Observe(float64(time.Since(t).Milliseconds()))
|
||||
} else if slots.TimeIntoSlot(s.genesisTime) < intervals[2] {
|
||||
batchForkChoiceAttsT2.Observe(float64(time.Since(t).Milliseconds()))
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
return
|
||||
|
||||
@@ -5,6 +5,7 @@ package attestations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
@@ -26,8 +27,9 @@ type Service struct {
|
||||
|
||||
// Config options for the service.
|
||||
type Config struct {
|
||||
Pool Pool
|
||||
pruneInterval time.Duration
|
||||
Pool Pool
|
||||
pruneInterval time.Duration
|
||||
InitialSyncComplete chan struct{}
|
||||
}
|
||||
|
||||
// NewService instantiates a new attestation pool service instance that will
|
||||
@@ -51,10 +53,24 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
|
||||
// Start an attestation pool service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
if err := s.waitForSync(s.cfg.InitialSyncComplete); err != nil {
|
||||
log.WithError(err).Error("failed to wait for initial sync")
|
||||
return
|
||||
}
|
||||
go s.prepareForkChoiceAtts()
|
||||
go s.pruneAttsPool()
|
||||
}
|
||||
|
||||
// waitForSync waits until the beacon node is synced to the latest head.
|
||||
func (s *Service) waitForSync(syncChan chan struct{}) error {
|
||||
select {
|
||||
case <-syncChan:
|
||||
return nil
|
||||
case <-s.ctx.Done():
|
||||
return errors.New("context closed, exiting goroutine")
|
||||
}
|
||||
}
|
||||
|
||||
// Stop the beacon block attestation pool service's main event loop
|
||||
// and associated goroutines.
|
||||
func (s *Service) Stop() error {
|
||||
|
||||
@@ -30,8 +30,22 @@ func (s *Store) SaveSyncCommitteeMessage(msg *ethpb.SyncCommitteeMessage) error
|
||||
return errors.New("not typed []ethpb.SyncCommitteeMessage")
|
||||
}
|
||||
|
||||
messages = append(messages, copied)
|
||||
savedSyncCommitteeMessageTotal.Inc()
|
||||
idx := -1
|
||||
for i, msg := range messages {
|
||||
if msg.ValidatorIndex == copied.ValidatorIndex {
|
||||
idx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if idx >= 0 {
|
||||
// Override the existing messages with a new one
|
||||
messages[idx] = copied
|
||||
} else {
|
||||
// Append the new message
|
||||
messages = append(messages, copied)
|
||||
savedSyncCommitteeMessageTotal.Inc()
|
||||
}
|
||||
|
||||
return s.messageCache.Push(&queue.Item{
|
||||
Key: syncCommitteeKey(msg.Slot),
|
||||
Value: messages,
|
||||
|
||||
@@ -55,6 +55,7 @@ go_library(
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
|
||||
@@ -460,6 +460,19 @@ func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
func peerIdsFromMultiAddrs(addrs []ma.Multiaddr) []peer.ID {
|
||||
peers := []peer.ID{}
|
||||
for _, a := range addrs {
|
||||
info, err := peer.AddrInfoFromP2pAddr(a)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Could not derive peer info from multiaddress %s", a.String())
|
||||
continue
|
||||
}
|
||||
peers = append(peers, info.ID)
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
func multiAddrFromString(address string) (ma.Multiaddr, error) {
|
||||
return ma.NewMultiaddr(address)
|
||||
}
|
||||
|
||||
@@ -6,12 +6,14 @@ import (
|
||||
"net"
|
||||
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/muxer/mplex"
|
||||
"github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v4/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
@@ -60,8 +62,8 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
|
||||
libp2p.UserAgent(version.BuildData()),
|
||||
libp2p.ConnectionGater(s),
|
||||
libp2p.Transport(tcp.NewTCPTransport),
|
||||
libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport),
|
||||
libp2p.DefaultMuxers,
|
||||
libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport),
|
||||
}
|
||||
|
||||
options = append(options, libp2p.Security(noise.ID, noise.New))
|
||||
@@ -99,6 +101,9 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
|
||||
}
|
||||
// Disable Ping Service.
|
||||
options = append(options, libp2p.Ping(false))
|
||||
if features.Get().DisableResourceManager {
|
||||
options = append(options, libp2p.ResourceManager(&network.NullResourceManager{}))
|
||||
}
|
||||
return options
|
||||
}
|
||||
|
||||
|
||||
@@ -123,7 +123,7 @@ func TestDefaultMultiplexers(t *testing.T) {
|
||||
err = cfg.Apply(append(opts, libp2p.FallbackDefaults)...)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, protocol.ID("/mplex/6.7.0"), cfg.Muxers[0].ID)
|
||||
assert.Equal(t, protocol.ID("/yamux/1.0.0"), cfg.Muxers[1].ID)
|
||||
assert.Equal(t, protocol.ID("/yamux/1.0.0"), cfg.Muxers[0].ID)
|
||||
assert.Equal(t, protocol.ID("/mplex/6.7.0"), cfg.Muxers[1].ID)
|
||||
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
@@ -44,6 +45,7 @@ go_test(
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
|
||||
@@ -38,9 +38,10 @@ type StoreConfig struct {
|
||||
// the mutex when accessing data.
|
||||
type Store struct {
|
||||
sync.RWMutex
|
||||
ctx context.Context
|
||||
config *StoreConfig
|
||||
peers map[peer.ID]*PeerData
|
||||
ctx context.Context
|
||||
config *StoreConfig
|
||||
peers map[peer.ID]*PeerData
|
||||
trustedPeers map[peer.ID]bool
|
||||
}
|
||||
|
||||
// PeerData aggregates protocol and application level info about a single peer.
|
||||
@@ -69,9 +70,10 @@ type PeerData struct {
|
||||
// NewStore creates new peer data store.
|
||||
func NewStore(ctx context.Context, config *StoreConfig) *Store {
|
||||
return &Store{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
peers: make(map[peer.ID]*PeerData),
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
peers: make(map[peer.ID]*PeerData),
|
||||
trustedPeers: make(map[peer.ID]bool),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,12 +107,25 @@ func (s *Store) DeletePeerData(pid peer.ID) {
|
||||
delete(s.peers, pid)
|
||||
}
|
||||
|
||||
// SetTrustedPeers sets our desired trusted peer set.
|
||||
func (s *Store) SetTrustedPeers(peers []peer.ID) {
|
||||
for _, p := range peers {
|
||||
s.trustedPeers[p] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Peers returns map of peer data objects.
|
||||
// Important: it is assumed that store mutex is locked when calling this method.
|
||||
func (s *Store) Peers() map[peer.ID]*PeerData {
|
||||
return s.peers
|
||||
}
|
||||
|
||||
// IsTrustedPeer checks that the provided peer
|
||||
// is in our trusted peer set.
|
||||
func (s *Store) IsTrustedPeer(p peer.ID) bool {
|
||||
return s.trustedPeers[p]
|
||||
}
|
||||
|
||||
// Config exposes store configuration params.
|
||||
func (s *Store) Config() *StoreConfig {
|
||||
return s.config
|
||||
|
||||
@@ -80,3 +80,20 @@ func TestStore_PeerDataGetOrCreate(t *testing.T) {
|
||||
assert.Equal(t, uint64(0), peerData.ProcessedBlocks)
|
||||
require.Equal(t, 1, len(store.Peers()))
|
||||
}
|
||||
|
||||
func TestStore_TrustedPeers(t *testing.T) {
|
||||
store := peerdata.NewStore(context.Background(), &peerdata.StoreConfig{
|
||||
MaxPeers: 12,
|
||||
})
|
||||
|
||||
pid1 := peer.ID("00001")
|
||||
pid2 := peer.ID("00002")
|
||||
pid3 := peer.ID("00003")
|
||||
|
||||
tPeers := []peer.ID{pid1, pid2, pid3}
|
||||
store.SetTrustedPeers(tPeers)
|
||||
|
||||
assert.Equal(t, true, store.IsTrustedPeer(pid1))
|
||||
assert.Equal(t, true, store.IsTrustedPeer(pid2))
|
||||
assert.Equal(t, true, store.IsTrustedPeer(pid3))
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -12,6 +13,11 @@ func TestMain(m *testing.M) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(io.Discard)
|
||||
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePeerScorer: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
resetFlags := flags.Get()
|
||||
flags.Init(&flags.GlobalFlags{
|
||||
BlockBatchLimit: 64,
|
||||
|
||||
@@ -15,6 +15,7 @@ go_library(
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -39,6 +40,7 @@ go_test(
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/rand"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
|
||||
)
|
||||
@@ -290,6 +291,9 @@ func (s *BlockProviderScorer) mapScoresAndPeers(
|
||||
func (s *BlockProviderScorer) FormatScorePretty(pid peer.ID) string {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
if !features.Get().EnablePeerScorer {
|
||||
return "disabled"
|
||||
}
|
||||
score := s.score(pid)
|
||||
return fmt.Sprintf("[%0.1f%%, raw: %0.2f, blocks: %d/%d]",
|
||||
(score/s.MaxScore())*100, score, s.processedBlocks(pid), s.config.ProcessedBlocksCap)
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/rand"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/time"
|
||||
@@ -459,6 +460,16 @@ func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
|
||||
tt.check(scorer)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("peer scorer disabled", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePeerScorer: false,
|
||||
})
|
||||
defer resetCfg()
|
||||
peerStatuses := peerStatusGen()
|
||||
scorer := peerStatuses.Scorers().BlockProviderScorer()
|
||||
assert.Equal(t, "disabled", scorer.FormatScorePretty("peer1"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestScorers_BlockProvider_BadPeerMarking(t *testing.T) {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -14,6 +15,11 @@ func TestMain(m *testing.M) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(io.Discard)
|
||||
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePeerScorer: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
resetFlags := flags.Get()
|
||||
flags.Init(&flags.GlobalFlags{
|
||||
BlockBatchLimit: 64,
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
)
|
||||
|
||||
var _ Scorer = (*Service)(nil)
|
||||
@@ -137,8 +138,10 @@ func (s *Service) IsBadPeerNoLock(pid peer.ID) bool {
|
||||
if s.scorers.peerStatusScorer.isBadPeer(pid) {
|
||||
return true
|
||||
}
|
||||
if s.scorers.gossipScorer.isBadPeer(pid) {
|
||||
return true
|
||||
if features.Get().EnablePeerScorer {
|
||||
if s.scorers.gossipScorer.isBadPeer(pid) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/rand"
|
||||
@@ -334,6 +335,10 @@ func (p *Status) IsBad(pid peer.ID) bool {
|
||||
|
||||
// isBad is the lock-free version of IsBad.
|
||||
func (p *Status) isBad(pid peer.ID) bool {
|
||||
// Do not disconnect from trusted peers.
|
||||
if p.store.IsTrustedPeer(pid) {
|
||||
return false
|
||||
}
|
||||
return p.isfromBadIP(pid) || p.scorers.IsBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
@@ -543,6 +548,11 @@ func (p *Status) Prune() {
|
||||
p.store.Lock()
|
||||
defer p.store.Unlock()
|
||||
|
||||
// Default to old method if flag isnt enabled.
|
||||
if !features.Get().EnablePeerScorer {
|
||||
p.deprecatedPrune()
|
||||
return
|
||||
}
|
||||
// Exit early if there is nothing to prune.
|
||||
if len(p.store.Peers()) <= p.store.Config().MaxPeers {
|
||||
return
|
||||
@@ -587,6 +597,52 @@ func (p *Status) Prune() {
|
||||
p.tallyIPTracker()
|
||||
}
|
||||
|
||||
// Deprecated: This is the old peer pruning method based on
|
||||
// bad response counts.
|
||||
func (p *Status) deprecatedPrune() {
|
||||
// Exit early if there is nothing to prune.
|
||||
if len(p.store.Peers()) <= p.store.Config().MaxPeers {
|
||||
return
|
||||
}
|
||||
|
||||
notBadPeer := func(peerData *peerdata.PeerData) bool {
|
||||
return peerData.BadResponses < p.scorers.BadResponsesScorer().Params().Threshold
|
||||
}
|
||||
type peerResp struct {
|
||||
pid peer.ID
|
||||
badResp int
|
||||
}
|
||||
peersToPrune := make([]*peerResp, 0)
|
||||
// Select disconnected peers with a smaller bad response count.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerDisconnected && notBadPeer(peerData) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
badResp: peerData.BadResponses,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Sort peers in ascending order, so the peers with the
|
||||
// least amount of bad responses are pruned first. This
|
||||
// is to protect the node from malicious/lousy peers so
|
||||
// that their memory is still kept.
|
||||
sort.Slice(peersToPrune, func(i, j int) bool {
|
||||
return peersToPrune[i].badResp < peersToPrune[j].badResp
|
||||
})
|
||||
|
||||
limitDiff := len(p.store.Peers()) - p.store.Config().MaxPeers
|
||||
if limitDiff > len(peersToPrune) {
|
||||
limitDiff = len(peersToPrune)
|
||||
}
|
||||
peersToPrune = peersToPrune[:limitDiff]
|
||||
// Delete peers from map.
|
||||
for _, peerData := range peersToPrune {
|
||||
p.store.DeletePeerData(peerData.pid)
|
||||
}
|
||||
p.tallyIPTracker()
|
||||
}
|
||||
|
||||
// BestFinalized returns the highest finalized epoch equal to or higher than ours that is agreed
|
||||
// upon by the majority of peers. This method may not return the absolute highest finalized, but
|
||||
// the finalized epoch in which most peers can serve blocks (plurality voting).
|
||||
@@ -694,6 +750,9 @@ func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch primitives.Epoch) (
|
||||
// bad response count. In the future scoring will be used
|
||||
// to determine the most suitable peers to take out.
|
||||
func (p *Status) PeersToPrune() []peer.ID {
|
||||
if !features.Get().EnablePeerScorer {
|
||||
return p.deprecatedPeersToPrune()
|
||||
}
|
||||
connLimit := p.ConnectedPeerLimit()
|
||||
inBoundLimit := uint64(p.InboundLimit())
|
||||
activePeers := p.Active()
|
||||
@@ -714,7 +773,7 @@ func (p *Status) PeersToPrune() []peer.ID {
|
||||
// Select connected and inbound peers to prune.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected &&
|
||||
peerData.Direction == network.DirInbound {
|
||||
peerData.Direction == network.DirInbound && !p.store.IsTrustedPeer(pid) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
score: p.scorers.ScoreNoLock(pid),
|
||||
@@ -757,6 +816,71 @@ func (p *Status) PeersToPrune() []peer.ID {
|
||||
return ids
|
||||
}
|
||||
|
||||
// Deprecated: Is used to represent the older method
|
||||
// of pruning which utilized bad response counts.
|
||||
func (p *Status) deprecatedPeersToPrune() []peer.ID {
|
||||
connLimit := p.ConnectedPeerLimit()
|
||||
inBoundLimit := p.InboundLimit()
|
||||
activePeers := p.Active()
|
||||
numInboundPeers := len(p.InboundConnected())
|
||||
// Exit early if we are still below our max
|
||||
// limit.
|
||||
if uint64(len(activePeers)) <= connLimit {
|
||||
return []peer.ID{}
|
||||
}
|
||||
p.store.Lock()
|
||||
defer p.store.Unlock()
|
||||
|
||||
type peerResp struct {
|
||||
pid peer.ID
|
||||
badResp int
|
||||
}
|
||||
peersToPrune := make([]*peerResp, 0)
|
||||
// Select connected and inbound peers to prune.
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected &&
|
||||
peerData.Direction == network.DirInbound && !p.store.IsTrustedPeer(pid) {
|
||||
peersToPrune = append(peersToPrune, &peerResp{
|
||||
pid: pid,
|
||||
badResp: peerData.BadResponses,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Sort in descending order to favour pruning peers with a
|
||||
// higher bad response count.
|
||||
sort.Slice(peersToPrune, func(i, j int) bool {
|
||||
return peersToPrune[i].badResp > peersToPrune[j].badResp
|
||||
})
|
||||
|
||||
// Determine amount of peers to prune using our
|
||||
// max connection limit.
|
||||
amountToPrune, err := pmath.Sub64(uint64(len(activePeers)), connLimit)
|
||||
if err != nil {
|
||||
// This should never happen
|
||||
log.WithError(err).Error("Failed to determine amount of peers to prune")
|
||||
return []peer.ID{}
|
||||
}
|
||||
// Also check for inbound peers above our limit.
|
||||
excessInbound := uint64(0)
|
||||
if numInboundPeers > inBoundLimit {
|
||||
excessInbound = uint64(numInboundPeers - inBoundLimit)
|
||||
}
|
||||
// Prune the largest amount between excess peers and
|
||||
// excess inbound peers.
|
||||
if excessInbound > amountToPrune {
|
||||
amountToPrune = excessInbound
|
||||
}
|
||||
if amountToPrune < uint64(len(peersToPrune)) {
|
||||
peersToPrune = peersToPrune[:amountToPrune]
|
||||
}
|
||||
ids := make([]peer.ID, 0, len(peersToPrune))
|
||||
for _, pr := range peersToPrune {
|
||||
ids = append(ids, pr.pid)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// HighestEpoch returns the highest epoch reported epoch amongst peers.
|
||||
func (p *Status) HighestEpoch() primitives.Epoch {
|
||||
p.store.RLock()
|
||||
@@ -780,6 +904,14 @@ func (p *Status) ConnectedPeerLimit() uint64 {
|
||||
return uint64(maxLim) - maxLimitBuffer
|
||||
}
|
||||
|
||||
// SetTrustedPeers sets our trusted peer set into
|
||||
// our peerstore.
|
||||
func (p *Status) SetTrustedPeers(peers []peer.ID) {
|
||||
p.store.Lock()
|
||||
defer p.store.Unlock()
|
||||
p.store.SetTrustedPeers(peers)
|
||||
}
|
||||
|
||||
// this method assumes the store lock is acquired before
|
||||
// executing the method.
|
||||
func (p *Status) isfromBadIP(pid peer.ID) bool {
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/wrapper"
|
||||
@@ -548,6 +549,10 @@ func TestPrune(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPeerIPTracker(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePeerScorer: false,
|
||||
})
|
||||
defer resetCfg()
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
@@ -582,7 +587,7 @@ func TestPeerIPTracker(t *testing.T) {
|
||||
p.Prune()
|
||||
|
||||
for _, pr := range badPeers {
|
||||
assert.Equal(t, true, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
assert.Equal(t, false, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -686,6 +691,10 @@ func TestAtInboundPeerLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPrunePeers(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePeerScorer: false,
|
||||
})
|
||||
defer resetCfg()
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
@@ -735,6 +744,80 @@ func TestPrunePeers(t *testing.T) {
|
||||
assert.Equal(t, network.DirInbound, dir)
|
||||
}
|
||||
|
||||
// Ensure it is in the descending order.
|
||||
currCount, err := p.Scorers().BadResponsesScorer().Count(peersToPrune[0])
|
||||
require.NoError(t, err)
|
||||
for _, pid := range peersToPrune {
|
||||
count, err := p.Scorers().BadResponsesScorer().Count(pid)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, currCount >= count)
|
||||
currCount = count
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrunePeers_TrustedPeers(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 1,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
for i := 0; i < 15; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirOutbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
// Assert there are no prunable peers.
|
||||
peersToPrune := p.PeersToPrune()
|
||||
assert.Equal(t, 0, len(peersToPrune))
|
||||
|
||||
for i := 0; i < 18; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
// Assert there are the correct prunable peers.
|
||||
peersToPrune = p.PeersToPrune()
|
||||
assert.Equal(t, 3, len(peersToPrune))
|
||||
|
||||
// Add in more peers.
|
||||
for i := 0; i < 13; i++ {
|
||||
// Peer added to peer handler.
|
||||
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
|
||||
}
|
||||
|
||||
trustedPeers := []peer.ID{}
|
||||
// Set up bad scores for inbound peers.
|
||||
inboundPeers := p.InboundConnected()
|
||||
for i, pid := range inboundPeers {
|
||||
modulo := i % 5
|
||||
// Increment bad scores for peers.
|
||||
for j := 0; j < modulo; j++ {
|
||||
p.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
if modulo == 4 {
|
||||
trustedPeers = append(trustedPeers, pid)
|
||||
}
|
||||
}
|
||||
p.SetTrustedPeers(trustedPeers)
|
||||
// Assert all peers more than max are prunable.
|
||||
peersToPrune = p.PeersToPrune()
|
||||
assert.Equal(t, 16, len(peersToPrune))
|
||||
|
||||
// Check that trusted peers are not pruned.
|
||||
for _, pid := range peersToPrune {
|
||||
for _, tPid := range trustedPeers {
|
||||
assert.NotEqual(t, pid.String(), tPid.String())
|
||||
}
|
||||
}
|
||||
for _, pid := range peersToPrune {
|
||||
dir, err := p.Direction(pid)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, network.DirInbound, dir)
|
||||
}
|
||||
|
||||
// Ensure it is in the descending order.
|
||||
currScore := p.Scorers().Score(peersToPrune[0])
|
||||
for _, pid := range peersToPrune {
|
||||
|
||||
@@ -210,6 +210,10 @@ func (s *Service) Start() {
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not connect to static peer")
|
||||
}
|
||||
// Set trusted peers for those that are provided as static addresses.
|
||||
pids := peerIdsFromMultiAddrs(addrs)
|
||||
s.peers.SetTrustedPeers(pids)
|
||||
peersToWatch = append(peersToWatch, s.cfg.StaticPeers...)
|
||||
s.connectWithAllPeers(addrs)
|
||||
}
|
||||
// Initialize metadata according to the
|
||||
|
||||
@@ -62,13 +62,13 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
currNum := len(s.pubsub.ListPeers(topic))
|
||||
wg := new(sync.WaitGroup)
|
||||
for {
|
||||
if currNum >= threshold {
|
||||
break
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
return false, errors.Errorf("unable to find requisite number of peers for topic %s - "+
|
||||
"only %d out of %d peers were able to be found", topic, currNum, threshold)
|
||||
}
|
||||
if currNum >= threshold {
|
||||
break
|
||||
}
|
||||
nodes := enode.ReadNodes(iterator, int(params.BeaconNetworkConfig().MinimumPeersInSubnetSearch))
|
||||
for _, node := range nodes {
|
||||
info, _, err := convertToAddrInfo(node)
|
||||
|
||||
@@ -3,6 +3,7 @@ package apimiddleware
|
||||
import (
|
||||
"encoding/base64"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -17,9 +18,14 @@ func (p *EpochParticipation) UnmarshalJSON(b []byte) error {
|
||||
if len(b) < 2 {
|
||||
return errors.New("epoch participation length must be at least 2")
|
||||
}
|
||||
if b[0] != '"' || b[len(b)-1] != '"' {
|
||||
return errors.Errorf("provided epoch participation json string is malformed: %s", string(b))
|
||||
}
|
||||
|
||||
// Remove leading and trailing quotation marks.
|
||||
decoded, err := base64.StdEncoding.DecodeString(string(b[1 : len(b)-1]))
|
||||
jsonString := string(b)
|
||||
jsonString = strings.Trim(jsonString, "\"")
|
||||
decoded, err := base64.StdEncoding.DecodeString(jsonString)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not decode epoch participation base64 value")
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ func TestUnmarshalEpochParticipation(t *testing.T) {
|
||||
ep := EpochParticipation{}
|
||||
err := ep.UnmarshalJSON([]byte(":illegal:"))
|
||||
require.NotNil(t, err)
|
||||
assert.ErrorContains(t, "could not decode epoch participation base64 value", err)
|
||||
assert.ErrorContains(t, "provided epoch participation json string is malformed", err)
|
||||
})
|
||||
t.Run("length too small", func(t *testing.T) {
|
||||
ep := EpochParticipation{}
|
||||
@@ -36,4 +36,8 @@ func TestUnmarshalEpochParticipation(t *testing.T) {
|
||||
require.NoError(t, ep.UnmarshalJSON([]byte("null")))
|
||||
assert.DeepEqual(t, EpochParticipation([]string{}), ep)
|
||||
})
|
||||
t.Run("invalid value", func(t *testing.T) {
|
||||
ep := EpochParticipation{}
|
||||
require.ErrorContains(t, "provided epoch participation json string is malformed", ep.UnmarshalJSON([]byte("XdHJ1ZQ==X")))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -6,10 +6,12 @@ go_library(
|
||||
"blinded_blocks.go",
|
||||
"blocks.go",
|
||||
"config.go",
|
||||
"handlers.go",
|
||||
"log.go",
|
||||
"pool.go",
|
||||
"server.go",
|
||||
"state.go",
|
||||
"structs.go",
|
||||
"sync_committee.go",
|
||||
"validator.go",
|
||||
],
|
||||
@@ -49,7 +51,9 @@ go_library(
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
@@ -57,8 +61,10 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_go_playground_validator_v10//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_wealdtech_go_bytesutil//:go_default_library",
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
@@ -75,6 +81,7 @@ go_test(
|
||||
"blinded_blocks_test.go",
|
||||
"blocks_test.go",
|
||||
"config_test.go",
|
||||
"handlers_test.go",
|
||||
"init_test.go",
|
||||
"pool_test.go",
|
||||
"server_test.go",
|
||||
@@ -129,6 +136,7 @@ go_test(
|
||||
"@com_github_golang_mock//gomock:go_default_library",
|
||||
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_stretchr_testify//mock:go_default_library",
|
||||
"@com_github_wealdtech_go_bytesutil//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
|
||||
@@ -44,8 +44,8 @@ func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
|
||||
}
|
||||
result, err = getBlindedBlockAltair(blk)
|
||||
@@ -53,8 +53,8 @@ func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
|
||||
}
|
||||
result, err = bs.getBlindedBlockBellatrix(ctx, blk)
|
||||
@@ -62,8 +62,8 @@ func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
|
||||
}
|
||||
result, err = bs.getBlindedBlockCapella(ctx, blk)
|
||||
@@ -71,8 +71,8 @@ func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
|
||||
}
|
||||
|
||||
@@ -99,8 +99,8 @@ func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequ
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = getSSZBlockAltair(blk)
|
||||
@@ -108,8 +108,8 @@ func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequ
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = bs.getBlindedSSZBlockBellatrix(ctx, blk)
|
||||
@@ -117,8 +117,8 @@ func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequ
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = bs.getBlindedSSZBlockCapella(ctx, blk)
|
||||
@@ -126,8 +126,8 @@ func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequ
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
|
||||
@@ -146,7 +146,7 @@ func (bs *Server) SubmitBlindedBlock(ctx context.Context, req *ethpbv2.SignedBli
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlindedBlock")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -189,7 +189,7 @@ func (bs *Server) SubmitBlindedBlockSSZ(ctx context.Context, req *ethpbv2.SSZCon
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlindedBlockSSZ")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -344,8 +344,8 @@ func getBlindedBlockAltair(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.B
|
||||
func (bs *Server) getBlindedBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
|
||||
bellatrixBlk, err := blk.PbBellatrixBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
|
||||
if blindedBellatrixBlk == nil {
|
||||
return nil, errNilBlock
|
||||
@@ -414,8 +414,8 @@ func (bs *Server) getBlindedBlockBellatrix(ctx context.Context, blk interfaces.R
|
||||
func (bs *Server) getBlindedBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
|
||||
capellaBlk, err := blk.PbCapellaBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedCapellaBlk, err := blk.PbBlindedCapellaBlock(); err == nil {
|
||||
if blindedCapellaBlk == nil {
|
||||
return nil, errNilBlock
|
||||
@@ -484,8 +484,8 @@ func (bs *Server) getBlindedBlockCapella(ctx context.Context, blk interfaces.Rea
|
||||
func (bs *Server) getBlindedSSZBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
|
||||
bellatrixBlk, err := blk.PbBellatrixBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
|
||||
if blindedBellatrixBlk == nil {
|
||||
return nil, errNilBlock
|
||||
@@ -559,8 +559,8 @@ func (bs *Server) getBlindedSSZBlockBellatrix(ctx context.Context, blk interface
|
||||
func (bs *Server) getBlindedSSZBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
|
||||
capellaBlk, err := blk.PbCapellaBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedCapellaBlk, err := blk.PbBlindedCapellaBlock(); err == nil {
|
||||
if blindedCapellaBlk == nil {
|
||||
return nil, errNilBlock
|
||||
|
||||
@@ -43,7 +43,7 @@ var (
|
||||
// determines the best block root and state root to use for a Checkpoint Sync starting from that point.
|
||||
// DEPRECATED: GetWeakSubjectivity endpoint will no longer be supported
|
||||
func (bs *Server) GetWeakSubjectivity(ctx context.Context, _ *empty.Empty) (*ethpbv1.WeakSubjectivityResponse, error) {
|
||||
if err := rpchelpers.ValidateSync(ctx, bs.SyncChecker, bs.HeadFetcher, bs.GenesisTimeFetcher, bs.OptimisticModeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.GenesisTimeFetcher, bs.OptimisticModeFetcher); err != nil {
|
||||
// This is already a grpc error, so we can't wrap it any further
|
||||
return nil, err
|
||||
}
|
||||
@@ -206,7 +206,7 @@ func (bs *Server) SubmitBlock(ctx context.Context, req *ethpbv2.SignedBeaconBloc
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlock")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -246,7 +246,7 @@ func (bs *Server) SubmitBlockSSZ(ctx context.Context, req *ethpbv2.SSZContainer)
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlockSSZ")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -420,8 +420,8 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = getBlockAltair(blk)
|
||||
@@ -429,8 +429,8 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = bs.getBlockBellatrix(ctx, blk)
|
||||
@@ -438,8 +438,8 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = bs.getBlockCapella(ctx, blk)
|
||||
@@ -447,8 +447,8 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
|
||||
@@ -474,8 +474,8 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = getSSZBlockAltair(blk)
|
||||
@@ -483,8 +483,8 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = bs.getSSZBlockBellatrix(ctx, blk)
|
||||
@@ -492,8 +492,8 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = bs.getSSZBlockCapella(ctx, blk)
|
||||
@@ -501,8 +501,8 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
|
||||
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
|
||||
@@ -689,8 +689,8 @@ func getBlockAltair(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlockRes
|
||||
func (bs *Server) getBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlockResponseV2, error) {
|
||||
bellatrixBlk, err := blk.PbBellatrixBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
|
||||
if blindedBellatrixBlk == nil {
|
||||
return nil, errNilBlock
|
||||
@@ -759,8 +759,8 @@ func (bs *Server) getBlockBellatrix(ctx context.Context, blk interfaces.ReadOnly
|
||||
func (bs *Server) getBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlockResponseV2, error) {
|
||||
capellaBlk, err := blk.PbCapellaBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedCapellaBlk, err := blk.PbBlindedCapellaBlock(); err == nil {
|
||||
if blindedCapellaBlk == nil {
|
||||
return nil, errNilBlock
|
||||
@@ -872,8 +872,8 @@ func getSSZBlockAltair(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZCo
|
||||
func (bs *Server) getSSZBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
|
||||
bellatrixBlk, err := blk.PbBellatrixBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
|
||||
if blindedBellatrixBlk == nil {
|
||||
return nil, errNilBlock
|
||||
@@ -948,8 +948,8 @@ func (bs *Server) getSSZBlockBellatrix(ctx context.Context, blk interfaces.ReadO
|
||||
func (bs *Server) getSSZBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
|
||||
capellaBlk, err := blk.PbCapellaBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
|
||||
// ErrUnsupportedField means that we have another block type
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
if blindedCapellaBlk, err := blk.PbBlindedCapellaBlock(); err == nil {
|
||||
if blindedCapellaBlk == nil {
|
||||
return nil, errNilBlock
|
||||
|
||||
@@ -136,7 +136,7 @@ func TestGetSpec(t *testing.T) {
|
||||
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 105, len(resp.Data))
|
||||
assert.Equal(t, 108, len(resp.Data))
|
||||
for k, v := range resp.Data {
|
||||
switch k {
|
||||
case "CONFIG_NAME":
|
||||
@@ -331,6 +331,10 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "0x08000000", v)
|
||||
case "DOMAIN_CONTRIBUTION_AND_PROOF":
|
||||
assert.Equal(t, "0x09000000", v)
|
||||
case "DOMAIN_BLS_TO_EXECUTION_CHANGE":
|
||||
assert.Equal(t, "0x0a000000", v)
|
||||
case "DOMAIN_APPLICATION_BUILDER":
|
||||
assert.Equal(t, "0x00000001", v)
|
||||
case "TRANSITION_TOTAL_DIFFICULTY":
|
||||
assert.Equal(t, "0", v)
|
||||
case "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":
|
||||
@@ -361,6 +365,8 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "2", v)
|
||||
case "REORG_WEIGHT_THRESHOLD":
|
||||
assert.Equal(t, "20", v)
|
||||
case "REORG_PARENT_WEIGHT_THRESHOLD":
|
||||
assert.Equal(t, "160", v)
|
||||
case "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY":
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
|
||||
375
beacon-chain/rpc/eth/beacon/handlers.go
Normal file
375
beacon-chain/rpc/eth/beacon/handlers.go
Normal file
@@ -0,0 +1,375 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-playground/validator/v10"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
broadcastValidationQueryParam = "broadcast_validation"
|
||||
broadcastValidationConsensus = "consensus"
|
||||
broadcastValidationConsensusAndEquivocation = "consensus_and_equivocation"
|
||||
)
|
||||
|
||||
// PublishBlindedBlockV2 instructs the beacon node to use the components of the `SignedBlindedBeaconBlock` to construct and publish a
|
||||
// `SignedBeaconBlock` by swapping out the `transactions_root` for the corresponding full list of `transactions`.
|
||||
// The beacon node should broadcast a newly constructed `SignedBeaconBlock` to the beacon network,
|
||||
// to be included in the beacon chain. The beacon node is not required to validate the signed
|
||||
// `BeaconBlock`, and a successful response (20X) only indicates that the broadcast has been
|
||||
// successful. The beacon node is expected to integrate the new block into its state, and
|
||||
// therefore validate the block internally, however blocks which fail the validation are still
|
||||
// broadcast but a different status code is returned (202). Pre-Bellatrix, this endpoint will accept
|
||||
// a `SignedBeaconBlock`. The broadcast behaviour may be adjusted via the `broadcast_validation`
|
||||
// query parameter.
|
||||
func (bs *Server) PublishBlindedBlockV2(w http.ResponseWriter, r *http.Request) {
|
||||
if ok := bs.checkSync(r.Context(), w); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
validate := validator.New()
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not read request body",
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
|
||||
var capellaBlock *SignedBlindedBeaconBlockCapella
|
||||
if err = unmarshalStrict(body, &capellaBlock); err == nil {
|
||||
if err = validate.Struct(capellaBlock); err == nil {
|
||||
consensusBlock, err := capellaBlock.ToGeneric()
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, consensusBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var bellatrixBlock *SignedBlindedBeaconBlockBellatrix
|
||||
if err = unmarshalStrict(body, &bellatrixBlock); err == nil {
|
||||
if err = validate.Struct(bellatrixBlock); err == nil {
|
||||
consensusBlock, err := bellatrixBlock.ToGeneric()
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, consensusBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
var altairBlock *SignedBeaconBlockAltair
|
||||
if err = unmarshalStrict(body, &altairBlock); err == nil {
|
||||
if err = validate.Struct(altairBlock); err == nil {
|
||||
consensusBlock, err := altairBlock.ToGeneric()
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, consensusBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
var phase0Block *SignedBeaconBlock
|
||||
if err = unmarshalStrict(body, &phase0Block); err == nil {
|
||||
if err = validate.Struct(phase0Block); err == nil {
|
||||
consensusBlock, err := phase0Block.ToGeneric()
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, consensusBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Body does not represent a valid block type",
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
}
|
||||
|
||||
// PublishBlockV2 instructs the beacon node to broadcast a newly signed beacon block to the beacon network,
|
||||
// to be included in the beacon chain. A success response (20x) indicates that the block
|
||||
// passed gossip validation and was successfully broadcast onto the network.
|
||||
// The beacon node is also expected to integrate the block into the state, but may broadcast it
|
||||
// before doing so, so as to aid timely delivery of the block. Should the block fail full
|
||||
// validation, a separate success response code (202) is used to indicate that the block was
|
||||
// successfully broadcast but failed integration. The broadcast behaviour may be adjusted via the
|
||||
// `broadcast_validation` query parameter.
|
||||
func (bs *Server) PublishBlockV2(w http.ResponseWriter, r *http.Request) {
|
||||
if ok := bs.checkSync(r.Context(), w); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
validate := validator.New()
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not read request body",
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
|
||||
var capellaBlock *SignedBeaconBlockCapella
|
||||
if err = unmarshalStrict(body, &capellaBlock); err == nil {
|
||||
if err = validate.Struct(capellaBlock); err == nil {
|
||||
consensusBlock, err := capellaBlock.ToGeneric()
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, consensusBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
var bellatrixBlock *SignedBeaconBlockBellatrix
|
||||
if err = unmarshalStrict(body, &bellatrixBlock); err == nil {
|
||||
if err = validate.Struct(bellatrixBlock); err == nil {
|
||||
consensusBlock, err := bellatrixBlock.ToGeneric()
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, consensusBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
var altairBlock *SignedBeaconBlockAltair
|
||||
if err = unmarshalStrict(body, &altairBlock); err == nil {
|
||||
if err = validate.Struct(altairBlock); err == nil {
|
||||
consensusBlock, err := altairBlock.ToGeneric()
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, consensusBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
var phase0Block *SignedBeaconBlock
|
||||
if err = unmarshalStrict(body, &phase0Block); err == nil {
|
||||
if err = validate.Struct(phase0Block); err == nil {
|
||||
consensusBlock, err := phase0Block.ToGeneric()
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not decode request body into consensus block: " + err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
bs.proposeBlock(r.Context(), w, consensusBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Body does not represent a valid block type",
|
||||
Code: http.StatusBadRequest,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
}
|
||||
|
||||
func (bs *Server) proposeBlock(ctx context.Context, w http.ResponseWriter, blk *eth.GenericSignedBeaconBlock) {
|
||||
_, err := bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, blk)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalStrict(data []byte, v interface{}) error {
|
||||
dec := json.NewDecoder(bytes.NewReader(data))
|
||||
dec.DisallowUnknownFields()
|
||||
return dec.Decode(v)
|
||||
}
|
||||
|
||||
func (bs *Server) validateBroadcast(r *http.Request, blk *eth.GenericSignedBeaconBlock) error {
|
||||
switch r.URL.Query().Get(broadcastValidationQueryParam) {
|
||||
case broadcastValidationConsensus:
|
||||
b, err := blocks.NewSignedBeaconBlock(blk.Block)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create signed beacon block")
|
||||
}
|
||||
if err = bs.validateConsensus(r.Context(), b); err != nil {
|
||||
return errors.Wrap(err, "consensus validation failed")
|
||||
}
|
||||
case broadcastValidationConsensusAndEquivocation:
|
||||
b, err := blocks.NewSignedBeaconBlock(blk.Block)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create signed beacon block")
|
||||
}
|
||||
if err = bs.validateConsensus(r.Context(), b); err != nil {
|
||||
return errors.Wrap(err, "consensus validation failed")
|
||||
}
|
||||
if err = bs.validateEquivocation(b.Block()); err != nil {
|
||||
return errors.Wrap(err, "equivocation validation failed")
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bs *Server) validateConsensus(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
parentRoot := blk.Block().ParentRoot()
|
||||
parentState, err := bs.Stater.State(ctx, parentRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get parent state")
|
||||
}
|
||||
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bs *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error {
|
||||
if bs.ForkchoiceFetcher.HighestReceivedBlockSlot() == blk.Slot() {
|
||||
return fmt.Errorf("block for slot %d already exists in fork choice", blk.Slot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bs *Server) checkSync(ctx context.Context, w http.ResponseWriter) bool {
|
||||
isSyncing, syncDetails, err := helpers.ValidateSyncHTTP(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: "Could not check if node is syncing: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return false
|
||||
}
|
||||
if isSyncing {
|
||||
msg := "Beacon node is currently syncing and not serving request on that endpoint"
|
||||
details, err := json.Marshal(syncDetails)
|
||||
if err == nil {
|
||||
msg += " Details: " + string(details)
|
||||
}
|
||||
errJson := &network.DefaultErrorJson{
|
||||
Message: msg,
|
||||
Code: http.StatusServiceUnavailable,
|
||||
}
|
||||
network.WriteError(w, errJson)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
1275
beacon-chain/rpc/eth/beacon/handlers_test.go
Normal file
1275
beacon-chain/rpc/eth/beacon/handlers_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1183,7 +1183,7 @@ func TestServer_SubmitAttestations_InvalidAttestationGRPCHeader(t *testing.T) {
|
||||
require.Equal(t, true, ok, "could not retrieve custom error metadata value")
|
||||
assert.DeepEqual(
|
||||
t,
|
||||
[]string{"{\"failures\":[{\"index\":0,\"message\":\"Incorrect attestation signature: signature must be 96 bytes\"}]}"},
|
||||
[]string{"{\"failures\":[{\"index\":0,\"message\":\"Incorrect attestation signature: could not create signature from byte slice: signature must be 96 bytes\"}]}"},
|
||||
v,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -45,4 +45,5 @@ type Server struct {
|
||||
ExecutionPayloadReconstructor execution.ExecutionPayloadReconstructor
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
BLSChangesPool blstoexec.PoolManager
|
||||
ForkchoiceFetcher blockchain.ForkchoiceFetcher
|
||||
}
|
||||
|
||||
1650
beacon-chain/rpc/eth/beacon/structs.go
Normal file
1650
beacon-chain/rpc/eth/beacon/structs.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -20,9 +20,9 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ValidateSync checks whether the node is currently syncing and returns an error if it is.
|
||||
// ValidateSyncGRPC checks whether the node is currently syncing and returns an error if it is.
|
||||
// It also appends syncing info to gRPC headers.
|
||||
func ValidateSync(
|
||||
func ValidateSyncGRPC(
|
||||
ctx context.Context,
|
||||
syncChecker sync.Checker,
|
||||
headFetcher blockchain.HeadFetcher,
|
||||
@@ -38,8 +38,8 @@ func ValidateSync(
|
||||
return status.Errorf(codes.Internal, "Could not check optimistic status: %v", err)
|
||||
}
|
||||
|
||||
syncDetailsContainer := &syncDetailsContainer{
|
||||
SyncDetails: &SyncDetailsJson{
|
||||
syncDetailsContainer := &SyncDetailsContainer{
|
||||
Data: &SyncDetailsJson{
|
||||
HeadSlot: strconv.FormatUint(uint64(headSlot), 10),
|
||||
SyncDistance: strconv.FormatUint(uint64(timeFetcher.CurrentSlot()-headSlot), 10),
|
||||
IsSyncing: true,
|
||||
@@ -58,6 +58,35 @@ func ValidateSync(
|
||||
return status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
// ValidateSyncHTTP checks whether the node is currently syncing and returns sync information.
|
||||
// It returns information whether the node is currently syncing along with sync details.
|
||||
func ValidateSyncHTTP(
|
||||
ctx context.Context,
|
||||
syncChecker sync.Checker,
|
||||
headFetcher blockchain.HeadFetcher,
|
||||
timeFetcher blockchain.TimeFetcher,
|
||||
optimisticModeFetcher blockchain.OptimisticModeFetcher,
|
||||
) (bool, *SyncDetailsContainer, error) {
|
||||
if !syncChecker.Syncing() {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
headSlot := headFetcher.HeadSlot()
|
||||
isOptimistic, err := optimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return true, nil, errors.Wrap(err, "could not check optimistic status")
|
||||
}
|
||||
syncDetails := &SyncDetailsContainer{
|
||||
Data: &SyncDetailsJson{
|
||||
HeadSlot: strconv.FormatUint(uint64(headSlot), 10),
|
||||
SyncDistance: strconv.FormatUint(uint64(timeFetcher.CurrentSlot()-headSlot), 10),
|
||||
IsSyncing: true,
|
||||
IsOptimistic: isOptimistic,
|
||||
},
|
||||
}
|
||||
return true, syncDetails, nil
|
||||
}
|
||||
|
||||
// IsOptimistic checks whether the beacon state's block is optimistic.
|
||||
func IsOptimistic(
|
||||
ctx context.Context,
|
||||
@@ -197,7 +226,7 @@ type SyncDetailsJson struct {
|
||||
ElOffline bool `json:"el_offline"`
|
||||
}
|
||||
|
||||
// SyncDetailsContainer is a wrapper for SyncDetails.
|
||||
type syncDetailsContainer struct {
|
||||
SyncDetails *SyncDetailsJson `json:"sync_details"`
|
||||
// SyncDetailsContainer is a wrapper for Data.
|
||||
type SyncDetailsContainer struct {
|
||||
Data *SyncDetailsJson `json:"data"`
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ func TestValidateSync(t *testing.T) {
|
||||
Slot: &headSlot,
|
||||
State: st,
|
||||
}
|
||||
err = ValidateSync(ctx, syncChecker, chainService, chainService, chainService)
|
||||
err = ValidateSyncGRPC(ctx, syncChecker, chainService, chainService, chainService)
|
||||
require.NotNil(t, err)
|
||||
sts, ok := grpc.ServerTransportStreamFromContext(ctx).(*runtime.ServerTransportStream)
|
||||
require.Equal(t, true, ok, "type assertion failed")
|
||||
@@ -51,7 +51,7 @@ func TestValidateSync(t *testing.T) {
|
||||
require.Equal(t, true, ok, "could not retrieve custom error metadata value")
|
||||
assert.DeepEqual(
|
||||
t,
|
||||
[]string{`{"sync_details":{"head_slot":"50","sync_distance":"50","is_syncing":true,"is_optimistic":false,"el_offline":false}}`},
|
||||
[]string{`{"data":{"head_slot":"50","sync_distance":"50","is_syncing":true,"is_optimistic":false,"el_offline":false}}`},
|
||||
v,
|
||||
)
|
||||
})
|
||||
@@ -67,7 +67,7 @@ func TestValidateSync(t *testing.T) {
|
||||
Slot: &headSlot,
|
||||
State: st,
|
||||
}
|
||||
err = ValidateSync(ctx, syncChecker, nil, nil, chainService)
|
||||
err = ValidateSyncGRPC(ctx, syncChecker, nil, nil, chainService)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -337,6 +337,9 @@ func handleEmptyFilters(req *ethpb.PeersRequest) (emptyState, emptyDirection boo
|
||||
func peerInfo(peerStatus *peers.Status, id peer.ID) (*ethpb.Peer, error) {
|
||||
enr, err := peerStatus.ENR(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, peerdata.ErrPeerUnknown) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not obtain ENR")
|
||||
}
|
||||
var serializedEnr string
|
||||
@@ -348,14 +351,23 @@ func peerInfo(peerStatus *peers.Status, id peer.ID) (*ethpb.Peer, error) {
|
||||
}
|
||||
address, err := peerStatus.Address(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, peerdata.ErrPeerUnknown) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not obtain address")
|
||||
}
|
||||
connectionState, err := peerStatus.ConnectionState(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, peerdata.ErrPeerUnknown) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not obtain connection state")
|
||||
}
|
||||
direction, err := peerStatus.Direction(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, peerdata.ErrPeerUnknown) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not obtain direction")
|
||||
}
|
||||
if eth.PeerDirection(direction) == eth.PeerDirection_UNKNOWN {
|
||||
|
||||
@@ -44,7 +44,7 @@ func (vs *Server) GetAttesterDuties(ctx context.Context, req *ethpbv1.AttesterDu
|
||||
ctx, span := trace.StartSpan(ctx, "validator.GetAttesterDuties")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -133,7 +133,7 @@ func (vs *Server) GetProposerDuties(ctx context.Context, req *ethpbv1.ProposerDu
|
||||
ctx, span := trace.StartSpan(ctx, "validator.GetProposerDuties")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -224,7 +224,7 @@ func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpbv2.SyncC
|
||||
ctx, span := trace.StartSpan(ctx, "validator.GetSyncCommitteeDuties")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -307,7 +307,7 @@ func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockR
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlockV2")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -406,7 +406,7 @@ func (vs *Server) ProduceBlockV2SSZ(ctx context.Context, req *ethpbv1.ProduceBlo
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlockV2SSZ")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -515,7 +515,7 @@ func (vs *Server) ProduceBlindedBlock(ctx context.Context, req *ethpbv1.ProduceB
|
||||
if !vs.BlockBuilder.Configured() {
|
||||
return nil, status.Error(codes.Internal, "Block builder not configured")
|
||||
}
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -614,7 +614,7 @@ func (vs *Server) ProduceBlindedBlockSSZ(ctx context.Context, req *ethpbv1.Produ
|
||||
if !vs.BlockBuilder.Configured() {
|
||||
return nil, status.Error(codes.Internal, "Block builder not configured")
|
||||
}
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -891,7 +891,7 @@ func (vs *Server) SubmitBeaconCommitteeSubscription(ctx context.Context, req *et
|
||||
ctx, span := trace.StartSpan(ctx, "validator.SubmitBeaconCommitteeSubscription")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -972,7 +972,7 @@ func (vs *Server) SubmitSyncCommitteeSubscription(ctx context.Context, req *ethp
|
||||
ctx, span := trace.StartSpan(ctx, "validator.SubmitSyncCommitteeSubscription")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ go_library(
|
||||
"server.go",
|
||||
"status.go",
|
||||
"sync_committee.go",
|
||||
"unblinder.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
@@ -55,13 +56,14 @@ go_library(
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/payload-attribute:go_default_library",
|
||||
@@ -96,6 +98,7 @@ go_library(
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
@@ -181,7 +184,6 @@ go_test(
|
||||
"proposer_attestations_test.go",
|
||||
"proposer_bellatrix_test.go",
|
||||
"proposer_builder_test.go",
|
||||
"proposer_capella_test.go",
|
||||
"proposer_deposits_test.go",
|
||||
"proposer_empty_block_test.go",
|
||||
"proposer_execution_payload_test.go",
|
||||
@@ -194,6 +196,7 @@ go_test(
|
||||
"status_mainnet_test.go",
|
||||
"status_test.go",
|
||||
"sync_committee_test.go",
|
||||
"unblinder_test.go",
|
||||
"validator_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user