mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
89 Commits
fuzz-conve
...
builder-te
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a18e04b3b6 | ||
|
|
ff92ea17e1 | ||
|
|
650bbe3437 | ||
|
|
b57bdb6525 | ||
|
|
559d9851ab | ||
|
|
370cf1a6c8 | ||
|
|
50e4d3df11 | ||
|
|
76f6d74b83 | ||
|
|
1750c08479 | ||
|
|
3fd0802db8 | ||
|
|
addb3cd665 | ||
|
|
5a7c260c82 | ||
|
|
b7c33888d2 | ||
|
|
c603d120d7 | ||
|
|
5c1d4dade1 | ||
|
|
39893bbe30 | ||
|
|
e14d3c9c6d | ||
|
|
9d40adb669 | ||
|
|
c89401f452 | ||
|
|
a984605064 | ||
|
|
f28b47bd87 | ||
|
|
588dea83b7 | ||
|
|
d6e832d7a3 | ||
|
|
e3f9f87da6 | ||
|
|
88a700c3df | ||
|
|
1012ec1915 | ||
|
|
b74947aa75 | ||
|
|
e3f69e4fad | ||
|
|
092e9e1d19 | ||
|
|
1c51f6d1be | ||
|
|
ee6aa4d4ec | ||
|
|
0d2696ed4e | ||
|
|
f145468b05 | ||
|
|
5449250142 | ||
|
|
03d44d7bfe | ||
|
|
b38e4ddc3e | ||
|
|
142711f20d | ||
|
|
06ca73946e | ||
|
|
e9e1c26ff0 | ||
|
|
9fab9df61e | ||
|
|
2e1e9bfa4c | ||
|
|
8340c013f2 | ||
|
|
eedafac822 | ||
|
|
e9ad7aeff8 | ||
|
|
3cfef20938 | ||
|
|
7309758dc6 | ||
|
|
e90284bc00 | ||
|
|
e2aa4b16d2 | ||
|
|
01e15a033f | ||
|
|
f09b06d6f6 | ||
|
|
16e66ee1b8 | ||
|
|
ca71dc03e8 | ||
|
|
d1fc8166c6 | ||
|
|
3d3890205f | ||
|
|
a90335b15e | ||
|
|
81a6c3f3cb | ||
|
|
7f718f90a7 | ||
|
|
e4dcbc4297 | ||
|
|
ffb3ef2feb | ||
|
|
8cd43d216f | ||
|
|
d25c0ec1a5 | ||
|
|
eeb4e576ad | ||
|
|
c8a0ad66f8 | ||
|
|
e1c4427ea5 | ||
|
|
7042791e31 | ||
|
|
8a725ac454 | ||
|
|
e771585b77 | ||
|
|
98622a052f | ||
|
|
61033ebea1 | ||
|
|
e808025b17 | ||
|
|
7db0435ee0 | ||
|
|
1f086e4333 | ||
|
|
0a834efd46 | ||
|
|
184e5be9de | ||
|
|
e33850bf51 | ||
|
|
cc643ac4cc | ||
|
|
abefe1e9d5 | ||
|
|
b4e89fb28b | ||
|
|
0973e08056 | ||
|
|
4ad1c4df01 | ||
|
|
6a197b47d9 | ||
|
|
d102421a25 | ||
|
|
7b1490429c | ||
|
|
fabd6f26d3 | ||
|
|
bbdf19cfd0 | ||
|
|
5d94030b4f | ||
|
|
16273a2040 | ||
|
|
97663548a1 | ||
|
|
7d9d8454b1 |
17
INTEROP.md
17
INTEROP.md
@@ -26,15 +26,18 @@ You can use `bazel run //tools/genesis-state-gen` to create a deterministic gene
|
||||
### Usage
|
||||
|
||||
- **--genesis-time** uint: Unix timestamp used as the genesis time in the generated genesis state (defaults to now)
|
||||
- **--mainnet-config** bool: Select whether genesis state should be generated with mainnet or minimal (default) params
|
||||
- **--num-validators** int: Number of validators to deterministically include in the generated genesis state
|
||||
- **--output-ssz** string: Output filename of the SSZ marshaling of the generated genesis state
|
||||
- **--config-name=interop** string: name of the beacon chain config to use when generating the state. ex mainnet|minimal|interop
|
||||
|
||||
**deprecated flag: use --config-name instead**
|
||||
- **--mainnet-config** bool: Select whether genesis state should be generated with mainnet or minimal (default) params
|
||||
|
||||
The example below creates 64 validator keys, instantiates a genesis state with those 64 validators and with genesis unix timestamp 1567542540,
|
||||
and finally writes a ssz encoded output to ~/Desktop/genesis.ssz. This file can be used to kickstart the beacon chain in the next section.
|
||||
and finally writes a ssz encoded output to ~/Desktop/genesis.ssz. This file can be used to kickstart the beacon chain in the next section. When using the `--interop-*` flags, the beacon node will assume the `interop` config should be used, unless a different config is specified on the command line.
|
||||
|
||||
```
|
||||
bazel run //tools/genesis-state-gen -- --output-ssz ~/Desktop/genesis.ssz --num-validators 64 --genesis-time 1567542540
|
||||
bazel run //tools/genesis-state-gen -- --config-name interop --output-ssz ~/Desktop/genesis.ssz --num-validators 64 --genesis-time 1567542540
|
||||
```
|
||||
|
||||
## Launching a Beacon Node + Validator Client
|
||||
@@ -46,8 +49,10 @@ Open up two terminal windows, run:
|
||||
```
|
||||
bazel run //beacon-chain -- \
|
||||
--bootstrap-node= \
|
||||
--deposit-contract $(curl -s https://prylabs.net/contract) \
|
||||
--deposit-contract 0x8A04d14125D0FDCDc742F4A05C051De07232EDa4 \
|
||||
--datadir=/tmp/beacon-chain-interop \
|
||||
--force-clear-db \
|
||||
--min-sync-peers=0 \
|
||||
--interop-num-validators 64 \
|
||||
--interop-eth1data-votes
|
||||
```
|
||||
@@ -69,8 +74,10 @@ Assuming you generated a `genesis.ssz` file with 64 validators, open up two term
|
||||
```
|
||||
bazel run //beacon-chain -- \
|
||||
--bootstrap-node= \
|
||||
--deposit-contract $(curl -s https://prylabs.net/contract) \
|
||||
--deposit-contract 0x8A04d14125D0FDCDc742F4A05C051De07232EDa4 \
|
||||
--datadir=/tmp/beacon-chain-interop \
|
||||
--force-clear-db \
|
||||
--min-sync-peers=0 \
|
||||
--interop-genesis-state /path/to/genesis.ssz \
|
||||
--interop-eth1data-votes
|
||||
```
|
||||
|
||||
@@ -46,10 +46,8 @@ const (
|
||||
type StateOrBlockId string
|
||||
|
||||
const (
|
||||
IdFinalized StateOrBlockId = "finalized"
|
||||
IdGenesis StateOrBlockId = "genesis"
|
||||
IdHead StateOrBlockId = "head"
|
||||
IdJustified StateOrBlockId = "justified"
|
||||
IdGenesis StateOrBlockId = "genesis"
|
||||
IdHead StateOrBlockId = "head"
|
||||
)
|
||||
|
||||
var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
|
||||
@@ -60,7 +58,7 @@ func IdFromRoot(r [32]byte) StateOrBlockId {
|
||||
return StateOrBlockId(fmt.Sprintf("%#x", r))
|
||||
}
|
||||
|
||||
// IdFromRoot encodes a Slot in the format expected by the API in places where a slot can be used to identify
|
||||
// IdFromSlot encodes a Slot in the format expected by the API in places where a slot can be used to identify
|
||||
// a BeaconState or SignedBeaconBlock.
|
||||
func IdFromSlot(s types.Slot) StateOrBlockId {
|
||||
return StateOrBlockId(strconv.FormatUint(uint64(s), 10))
|
||||
|
||||
41
api/client/builder/BUILD.bazel
Normal file
41
api/client/builder/BUILD.bazel
Normal file
@@ -0,0 +1,41 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"client.go",
|
||||
"errors.go",
|
||||
"types.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/api/client/builder",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"client_test.go",
|
||||
"types_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
206
api/client/builder/client.go
Normal file
206
api/client/builder/client.go
Normal file
@@ -0,0 +1,206 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
getExecHeaderPath = "/eth/v1/builder/header/{{.Slot}}/{{.ParentHash}}/{{.Pubkey}}"
|
||||
getStatus = "/eth/v1/builder/status"
|
||||
postBlindedBeaconBlockPath = "/eth/v1/builder/blinded_blocks"
|
||||
postRegisterValidatorPath = "/eth/v1/builder/validators"
|
||||
)
|
||||
|
||||
var errMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
|
||||
|
||||
// ClientOpt is a functional option for the Client type (http.Client wrapper)
|
||||
type ClientOpt func(*Client)
|
||||
|
||||
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
|
||||
func WithTimeout(timeout time.Duration) ClientOpt {
|
||||
return func(c *Client) {
|
||||
c.hc.Timeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
// Client provides a collection of helper methods for calling Builder API endpoints.
|
||||
type Client struct {
|
||||
hc *http.Client
|
||||
baseURL *url.URL
|
||||
}
|
||||
|
||||
// NewClient constructs a new client with the provided options (ex WithTimeout).
|
||||
// `host` is the base host + port used to construct request urls. This value can be
|
||||
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
|
||||
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
||||
u, err := urlForHost(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
hc: &http.Client{},
|
||||
baseURL: u,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(c)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func urlForHost(h string) (*url.URL, error) {
|
||||
// try to parse as url (being permissive)
|
||||
u, err := url.Parse(h)
|
||||
if err == nil && u.Host != "" {
|
||||
return u, nil
|
||||
}
|
||||
// try to parse as host:port
|
||||
host, port, err := net.SplitHostPort(h)
|
||||
if err != nil {
|
||||
return nil, errMalformedHostname
|
||||
}
|
||||
return &url.URL{Host: net.JoinHostPort(host, port), Scheme: "http"}, nil
|
||||
}
|
||||
|
||||
// NodeURL returns a human-readable string representation of the beacon node base url.
|
||||
func (c *Client) NodeURL() string {
|
||||
return c.baseURL.String()
|
||||
}
|
||||
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
// do is a generic, opinionated GET function to reduce boilerplate amongst the getters in this packageapi/client/builder/types.go.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) ([]byte, error) {
|
||||
u := c.baseURL.ResolveReference(&url.URL{Path: path})
|
||||
log.Printf("requesting %s", u.String())
|
||||
req, err := http.NewRequestWithContext(ctx, method, u.String(), body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(req)
|
||||
}
|
||||
r, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
err = r.Body.Close()
|
||||
}()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return nil, non200Err(r)
|
||||
}
|
||||
b, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error reading http response body from GetBlock")
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
var execHeaderTemplate = template.Must(template.New("").Parse(getExecHeaderPath))
|
||||
|
||||
func execHeaderPath(slot types.Slot, parentHash [32]byte, pubkey [48]byte) (string, error) {
|
||||
v := struct {
|
||||
Slot types.Slot
|
||||
ParentHash string
|
||||
Pubkey string
|
||||
}{
|
||||
Slot: slot,
|
||||
ParentHash: fmt.Sprintf("%#x", parentHash),
|
||||
Pubkey: fmt.Sprintf("%#x", pubkey),
|
||||
}
|
||||
b := bytes.NewBuffer(nil)
|
||||
err := execHeaderTemplate.Execute(b, v)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error rendering exec header template with slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
}
|
||||
return b.String(), nil
|
||||
}
|
||||
|
||||
// GetHeader is used by a proposing validator to request an ExecutionPayloadHeader from the Builder node.
|
||||
func (c *Client) GetHeader(ctx context.Context, slot types.Slot, parentHash [32]byte, pubkey [48]byte) (*ethpb.SignedBuilderBid, error) {
|
||||
path, err := execHeaderPath(slot, parentHash, pubkey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hb, err := c.do(ctx, http.MethodGet, path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hr := &ExecHeaderResponse{}
|
||||
if err := json.Unmarshal(hb, hr); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
}
|
||||
return hr.ToProto()
|
||||
}
|
||||
|
||||
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
|
||||
// fields with 0x prefixes) and posts to the builder validator registration endpoint.
|
||||
func (c *Client) RegisterValidator(ctx context.Context, svr *ethpb.SignedValidatorRegistrationV1) error {
|
||||
v := &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr}
|
||||
body, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error encoding the SignedValidatorRegistration value body in RegisterValidator")
|
||||
}
|
||||
_, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body))
|
||||
return err
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
|
||||
// The response is the full ExecutionPayload used to create the blinded block.
|
||||
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error) {
|
||||
v := &SignedBlindedBeaconBlockBellatrix{SignedBlindedBeaconBlockBellatrix: sb}
|
||||
body, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
|
||||
}
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
|
||||
}
|
||||
ep := &ExecPayloadResponse{}
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
|
||||
}
|
||||
return ep.ToProto()
|
||||
}
|
||||
|
||||
// Status asks the remote builder server for a health check. A response of 200 with an empty body is the success/healthy
|
||||
// response, and an error response may have an error message. This method will return a nil value for error in the
|
||||
// happy path, and an error with information about the server response body for a non-200 response.
|
||||
func (c *Client) Status(ctx context.Context) error {
|
||||
_, err := c.do(ctx, http.MethodGet, getStatus, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
var body string
|
||||
if err != nil {
|
||||
body = "(Unable to read response body.)"
|
||||
} else {
|
||||
body = "response body:\n" + string(bodyBytes)
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
switch response.StatusCode {
|
||||
case 404:
|
||||
return errors.Wrap(ErrNotFound, msg)
|
||||
default:
|
||||
return errors.Wrap(ErrNotOK, msg)
|
||||
}
|
||||
}
|
||||
323
api/client/builder/client_test.go
Normal file
323
api/client/builder/client_test.go
Normal file
@@ -0,0 +1,323 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
type roundtrip func(*http.Request) (*http.Response, error)
|
||||
|
||||
func (fn roundtrip) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
return fn(r)
|
||||
}
|
||||
|
||||
func TestClient_Status(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
statusPath := "/eth/v1/builder/status"
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
defer func() {
|
||||
if r.Body == nil {
|
||||
return
|
||||
}
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
require.Equal(t, statusPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBuffer(nil)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
require.NoError(t, c.Status(ctx))
|
||||
hc = &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
defer func() {
|
||||
if r.Body == nil {
|
||||
return
|
||||
}
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
require.Equal(t, statusPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
Body: io.NopCloser(bytes.NewBuffer(nil)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c = &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
require.ErrorIs(t, c.Status(ctx), ErrNotOK)
|
||||
}
|
||||
|
||||
func TestClient_RegisterValidator(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
expectedBody := `{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"}}`
|
||||
expectedPath := "/eth/v1/builder/validators"
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
body, err := io.ReadAll(r.Body)
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedBody, string(body))
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
require.Equal(t, http.MethodPost, r.Method)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBuffer(nil)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
reg := ð.SignedValidatorRegistrationV1{
|
||||
Message: ð.ValidatorRegistrationV1{
|
||||
FeeRecipient: ezDecode(t, fieldparams.EthBurnAddressHex),
|
||||
GasLimit: 23,
|
||||
Timestamp: 42,
|
||||
Pubkey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
},
|
||||
}
|
||||
require.NoError(t, c.RegisterValidator(ctx, reg))
|
||||
}
|
||||
|
||||
func TestClient_GetHeader(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
Body: io.NopCloser(bytes.NewBuffer(nil)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
var slot types.Slot = 23
|
||||
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
||||
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.ErrorIs(t, err, ErrNotOK)
|
||||
|
||||
hc = &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponse)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c = &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.NoError(t, err)
|
||||
expectedSig := ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505")
|
||||
require.Equal(t, true, bytes.Equal(expectedSig, h.Signature))
|
||||
expectedTxRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.Equal(t, true, bytes.Equal(expectedTxRoot, h.Message.Header.TransactionsRoot))
|
||||
require.Equal(t, uint64(1), h.Message.Header.GasUsed)
|
||||
value := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", h.Message.Value))
|
||||
}
|
||||
|
||||
func TestSubmitBlindedBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
sbbb := testSignedBlindedBeaconBlockBellatrix(t)
|
||||
ep, err := c.SubmitBlindedBlock(ctx, sbbb)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"), ep.ParentHash))
|
||||
bfpg := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
require.Equal(t, fmt.Sprintf("%#x", bfpg.SSZBytes()), fmt.Sprintf("%#x", ep.BaseFeePerGas))
|
||||
require.Equal(t, uint64(1), ep.GasLimit)
|
||||
}
|
||||
|
||||
func testSignedBlindedBeaconBlockBellatrix(t *testing.T) *eth.SignedBlindedBeaconBlockBellatrix {
|
||||
return ð.SignedBlindedBeaconBlockBellatrix{
|
||||
Block: ð.BlindedBeaconBlockBellatrix{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Body: ð.BlindedBeaconBlockBodyBellatrix{
|
||||
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
Eth1Data: ð.Eth1Data{
|
||||
DepositRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
DepositCount: 1,
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Graffiti: ezDecode(t, "0xdeadbeefc0ffee"),
|
||||
ProposerSlashings: []*eth.ProposerSlashing{
|
||||
{
|
||||
Header_1: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
Header_2: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
},
|
||||
AttesterSlashings: []*eth.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
Attestation_2: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Attestations: []*eth.Attestation{
|
||||
{
|
||||
AggregationBits: bitfield.Bitlist{0x01},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
Deposits: []*eth.Deposit{
|
||||
{
|
||||
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
|
||||
Data: ð.Deposit_Data{
|
||||
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Amount: 1,
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
},
|
||||
VoluntaryExits: []*eth.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ð.VoluntaryExit{
|
||||
Epoch: 1,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
SyncAggregate: ð.SyncAggregate{
|
||||
SyncCommitteeSignature: make([]byte, 48),
|
||||
SyncCommitteeBits: bitfield.Bitvector512{0x01},
|
||||
},
|
||||
ExecutionPayloadHeader: ð.ExecutionPayloadHeader{
|
||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
|
||||
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BaseFeePerGas: []byte(strconv.FormatUint(1, 10)),
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
}
|
||||
}
|
||||
10
api/client/builder/errors.go
Normal file
10
api/client/builder/errors.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package builder
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
// ErrNotOK is used to indicate when an HTTP request to the Beacon Node API failed with any non-2xx response code.
|
||||
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
|
||||
var ErrNotOK = errors.New("did not receive 2xx response from API")
|
||||
|
||||
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
|
||||
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
|
||||
1
api/client/builder/testdata/blinded-block.json
vendored
Normal file
1
api/client/builder/testdata/blinded-block.json
vendored
Normal file
File diff suppressed because one or more lines are too long
572
api/client/builder/types.go
Normal file
572
api/client/builder/types.go
Normal file
@@ -0,0 +1,572 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
type SignedValidatorRegistration struct {
|
||||
*eth.SignedValidatorRegistrationV1
|
||||
}
|
||||
|
||||
type ValidatorRegistration struct {
|
||||
*eth.ValidatorRegistrationV1
|
||||
}
|
||||
|
||||
func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *ValidatorRegistration `json:"message,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{
|
||||
Message: &ValidatorRegistration{r.Message},
|
||||
Signature: r.SignedValidatorRegistrationV1.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
|
||||
GasLimit string `json:"gas_limit,omitempty"`
|
||||
Timestamp string `json:"timestamp,omitempty"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
|
||||
}{
|
||||
FeeRecipient: r.FeeRecipient,
|
||||
GasLimit: fmt.Sprintf("%d", r.GasLimit),
|
||||
Timestamp: fmt.Sprintf("%d", r.Timestamp),
|
||||
Pubkey: r.Pubkey,
|
||||
})
|
||||
}
|
||||
|
||||
type Uint256 struct {
|
||||
*big.Int
|
||||
}
|
||||
|
||||
func stringToUint256(s string) Uint256 {
|
||||
bi := new(big.Int)
|
||||
bi.SetString(s, 10)
|
||||
return Uint256{Int: bi}
|
||||
}
|
||||
|
||||
// sszBytesToUint256 creates a Uint256 from a ssz-style (little-endian byte slice) representation.
|
||||
func sszBytesToUint256(b []byte) Uint256 {
|
||||
bi := new(big.Int)
|
||||
return Uint256{Int: bi.SetBytes((b))}
|
||||
}
|
||||
|
||||
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256
|
||||
func (s Uint256) SSZBytes() []byte {
|
||||
return bytesutil.ReverseByteOrder(s.Int.Bytes())
|
||||
}
|
||||
|
||||
var errUnmarshalUint256Failed = errors.New("unable to UnmarshalText into a Uint256 value")
|
||||
|
||||
func (s *Uint256) UnmarshalJSON(t []byte) error {
|
||||
start := 0
|
||||
end := len(t)
|
||||
if t[0] == '"' {
|
||||
start += 1
|
||||
}
|
||||
if t[end-1] == '"' {
|
||||
end -= 1
|
||||
}
|
||||
return s.UnmarshalText(t[start:end])
|
||||
}
|
||||
|
||||
func (s *Uint256) UnmarshalText(t []byte) error {
|
||||
if s.Int == nil {
|
||||
s.Int = big.NewInt(0)
|
||||
}
|
||||
z, ok := s.SetString(string(t), 10)
|
||||
if !ok {
|
||||
return errors.Wrapf(errUnmarshalUint256Failed, "value=%s", string(t))
|
||||
}
|
||||
s.Int = z
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s Uint256) MarshalJSON() ([]byte, error) {
|
||||
t, err := s.MarshalText()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t = append([]byte{'"'}, t...)
|
||||
t = append(t, '"')
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (s Uint256) MarshalText() ([]byte, error) {
|
||||
return []byte(s.String()), nil
|
||||
}
|
||||
|
||||
type Uint64String uint64
|
||||
|
||||
func (s *Uint64String) UnmarshalText(t []byte) error {
|
||||
u, err := strconv.ParseUint(string(t), 10, 64)
|
||||
*s = Uint64String(u)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s Uint64String) MarshalText() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("%d", s)), nil
|
||||
}
|
||||
|
||||
type ExecHeaderResponse struct {
|
||||
Version string `json:"version,omitempty"`
|
||||
Data struct {
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
Message *BuilderBid `json:"message,omitempty"`
|
||||
} `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
func (ehr *ExecHeaderResponse) ToProto() (*eth.SignedBuilderBid, error) {
|
||||
bb, err := ehr.Data.Message.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ð.SignedBuilderBid{
|
||||
Message: bb,
|
||||
Signature: ehr.Data.Signature,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bb *BuilderBid) ToProto() (*eth.BuilderBid, error) {
|
||||
header, err := bb.Header.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ð.BuilderBid{
|
||||
Header: header,
|
||||
Value: bb.Value.SSZBytes(),
|
||||
Pubkey: bb.Pubkey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *ExecutionPayloadHeader) ToProto() (*eth.ExecutionPayloadHeader, error) {
|
||||
return ð.ExecutionPayloadHeader{
|
||||
ParentHash: h.ParentHash,
|
||||
FeeRecipient: h.FeeRecipient,
|
||||
StateRoot: h.StateRoot,
|
||||
ReceiptsRoot: h.ReceiptsRoot,
|
||||
LogsBloom: h.LogsBloom,
|
||||
PrevRandao: h.PrevRandao,
|
||||
BlockNumber: uint64(h.BlockNumber),
|
||||
GasLimit: uint64(h.GasLimit),
|
||||
GasUsed: uint64(h.GasUsed),
|
||||
Timestamp: uint64(h.Timestamp),
|
||||
ExtraData: h.ExtraData,
|
||||
BaseFeePerGas: h.BaseFeePerGas.SSZBytes(),
|
||||
BlockHash: h.BlockHash,
|
||||
TransactionsRoot: h.TransactionsRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type BuilderBid struct {
|
||||
Header *ExecutionPayloadHeader `json:"header,omitempty"`
|
||||
Value Uint256 `json:"value,omitempty"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeader struct {
|
||||
ParentHash hexutil.Bytes `json:"parent_hash,omitempty"`
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
|
||||
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
|
||||
ReceiptsRoot hexutil.Bytes `json:"receipts_root,omitempty"`
|
||||
LogsBloom hexutil.Bytes `json:"logs_bloom,omitempty"`
|
||||
PrevRandao hexutil.Bytes `json:"prev_randao,omitempty"`
|
||||
BlockNumber Uint64String `json:"block_number,omitempty"`
|
||||
GasLimit Uint64String `json:"gas_limit,omitempty"`
|
||||
GasUsed Uint64String `json:"gas_used,omitempty"`
|
||||
Timestamp Uint64String `json:"timestamp,omitempty"`
|
||||
ExtraData hexutil.Bytes `json:"extra_data,omitempty"`
|
||||
BaseFeePerGas Uint256 `json:"base_fee_per_gas,omitempty"`
|
||||
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
|
||||
TransactionsRoot hexutil.Bytes `json:"transactions_root,omitempty"`
|
||||
*eth.ExecutionPayloadHeader
|
||||
}
|
||||
|
||||
func (h *ExecutionPayloadHeader) MarshalJSON() ([]byte, error) {
|
||||
type MarshalCaller ExecutionPayloadHeader
|
||||
return json.Marshal(&MarshalCaller{
|
||||
ParentHash: h.ExecutionPayloadHeader.ParentHash,
|
||||
FeeRecipient: h.ExecutionPayloadHeader.FeeRecipient,
|
||||
StateRoot: h.ExecutionPayloadHeader.StateRoot,
|
||||
ReceiptsRoot: h.ExecutionPayloadHeader.ReceiptsRoot,
|
||||
LogsBloom: h.ExecutionPayloadHeader.LogsBloom,
|
||||
PrevRandao: h.ExecutionPayloadHeader.PrevRandao,
|
||||
BlockNumber: Uint64String(h.ExecutionPayloadHeader.BlockNumber),
|
||||
GasLimit: Uint64String(h.ExecutionPayloadHeader.GasLimit),
|
||||
GasUsed: Uint64String(h.ExecutionPayloadHeader.GasUsed),
|
||||
Timestamp: Uint64String(h.ExecutionPayloadHeader.Timestamp),
|
||||
ExtraData: h.ExecutionPayloadHeader.ExtraData,
|
||||
BaseFeePerGas: sszBytesToUint256(h.ExecutionPayloadHeader.BaseFeePerGas),
|
||||
BlockHash: h.ExecutionPayloadHeader.BlockHash,
|
||||
TransactionsRoot: h.ExecutionPayloadHeader.TransactionsRoot,
|
||||
})
|
||||
}
|
||||
|
||||
type ExecPayloadResponse struct {
|
||||
Version string `json:"version,omitempty"`
|
||||
Data ExecutionPayload `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
type ExecutionPayload struct {
|
||||
ParentHash hexutil.Bytes `json:"parent_hash,omitempty"`
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
|
||||
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
|
||||
ReceiptsRoot hexutil.Bytes `json:"receipts_root,omitempty"`
|
||||
LogsBloom hexutil.Bytes `json:"logs_bloom,omitempty"`
|
||||
PrevRandao hexutil.Bytes `json:"prev_randao,omitempty"`
|
||||
BlockNumber Uint64String `json:"block_number,omitempty"`
|
||||
GasLimit Uint64String `json:"gas_limit,omitempty"`
|
||||
GasUsed Uint64String `json:"gas_used,omitempty"`
|
||||
Timestamp Uint64String `json:"timestamp,omitempty"`
|
||||
ExtraData hexutil.Bytes `json:"extra_data,omitempty"`
|
||||
BaseFeePerGas Uint256 `json:"base_fee_per_gas,omitempty"`
|
||||
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
|
||||
Transactions []hexutil.Bytes `json:"transactions,omitempty"`
|
||||
}
|
||||
|
||||
func (r *ExecPayloadResponse) ToProto() (*v1.ExecutionPayload, error) {
|
||||
return r.Data.ToProto()
|
||||
}
|
||||
|
||||
func (p *ExecutionPayload) ToProto() (*v1.ExecutionPayload, error) {
|
||||
txs := make([][]byte, len(p.Transactions))
|
||||
for i := range p.Transactions {
|
||||
txs[i] = p.Transactions[i]
|
||||
}
|
||||
return &v1.ExecutionPayload{
|
||||
ParentHash: p.ParentHash,
|
||||
FeeRecipient: p.FeeRecipient,
|
||||
StateRoot: p.StateRoot,
|
||||
ReceiptsRoot: p.ReceiptsRoot,
|
||||
LogsBloom: p.LogsBloom,
|
||||
PrevRandao: p.PrevRandao,
|
||||
BlockNumber: uint64(p.BlockNumber),
|
||||
GasLimit: uint64(p.GasLimit),
|
||||
GasUsed: uint64(p.GasUsed),
|
||||
Timestamp: uint64(p.Timestamp),
|
||||
ExtraData: p.ExtraData,
|
||||
BaseFeePerGas: p.BaseFeePerGas.SSZBytes(),
|
||||
BlockHash: p.BlockHash,
|
||||
Transactions: txs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockBellatrix struct {
|
||||
*eth.SignedBlindedBeaconBlockBellatrix
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBellatrix struct {
|
||||
*eth.BlindedBeaconBlockBellatrix
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyBellatrix struct {
|
||||
*eth.BlindedBeaconBlockBodyBellatrix
|
||||
}
|
||||
|
||||
func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *BlindedBeaconBlockBellatrix `json:"message,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{
|
||||
Message: &BlindedBeaconBlockBellatrix{r.SignedBlindedBeaconBlockBellatrix.Block},
|
||||
Signature: r.SignedBlindedBeaconBlockBellatrix.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
func (b *BlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index,omitempty"`
|
||||
ParentRoot hexutil.Bytes `json:"parent_root,omitempty"`
|
||||
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
|
||||
Body *BlindedBeaconBlockBodyBellatrix `json:"body,omitempty"`
|
||||
}{
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
||||
ParentRoot: b.ParentRoot,
|
||||
StateRoot: b.StateRoot,
|
||||
Body: &BlindedBeaconBlockBodyBellatrix{b.BlindedBeaconBlockBellatrix.Body},
|
||||
})
|
||||
}
|
||||
|
||||
type ProposerSlashing struct {
|
||||
*eth.ProposerSlashing
|
||||
}
|
||||
|
||||
func (s *ProposerSlashing) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
SignedHeader1 *SignedBeaconBlockHeader `json:"signed_header_1,omitempty"`
|
||||
SignedHeader2 *SignedBeaconBlockHeader `json:"signed_header_2,omitempty"`
|
||||
}{
|
||||
SignedHeader1: &SignedBeaconBlockHeader{s.ProposerSlashing.Header_1},
|
||||
SignedHeader2: &SignedBeaconBlockHeader{s.ProposerSlashing.Header_2},
|
||||
})
|
||||
}
|
||||
|
||||
type SignedBeaconBlockHeader struct {
|
||||
*eth.SignedBeaconBlockHeader
|
||||
}
|
||||
|
||||
func (h *SignedBeaconBlockHeader) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Header *BeaconBlockHeader `json:"message,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{
|
||||
Header: &BeaconBlockHeader{h.SignedBeaconBlockHeader.Header},
|
||||
Signature: h.SignedBeaconBlockHeader.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
type BeaconBlockHeader struct {
|
||||
*eth.BeaconBlockHeader
|
||||
}
|
||||
|
||||
func (h *BeaconBlockHeader) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Slot string `json:"slot,omitempty"`
|
||||
ProposerIndex string `json:"proposer_index,omitempty"`
|
||||
ParentRoot hexutil.Bytes `json:"parent_root,omitempty"`
|
||||
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
|
||||
BodyRoot hexutil.Bytes `json:"body_root,omitempty"`
|
||||
}{
|
||||
Slot: fmt.Sprintf("%d", h.BeaconBlockHeader.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", h.BeaconBlockHeader.ProposerIndex),
|
||||
ParentRoot: h.BeaconBlockHeader.ParentRoot,
|
||||
StateRoot: h.BeaconBlockHeader.StateRoot,
|
||||
BodyRoot: h.BeaconBlockHeader.BodyRoot,
|
||||
})
|
||||
}
|
||||
|
||||
type IndexedAttestation struct {
|
||||
*eth.IndexedAttestation
|
||||
}
|
||||
|
||||
func (a *IndexedAttestation) MarshalJSON() ([]byte, error) {
|
||||
indices := make([]string, len(a.IndexedAttestation.AttestingIndices))
|
||||
for i := range a.IndexedAttestation.AttestingIndices {
|
||||
indices[i] = fmt.Sprintf("%d", a.AttestingIndices[i])
|
||||
}
|
||||
return json.Marshal(struct {
|
||||
AttestingIndices []string `json:"attesting_indices,omitempty"`
|
||||
Data *AttestationData `json:"data,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{
|
||||
AttestingIndices: indices,
|
||||
Data: &AttestationData{a.IndexedAttestation.Data},
|
||||
Signature: a.IndexedAttestation.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
type AttesterSlashing struct {
|
||||
*eth.AttesterSlashing
|
||||
}
|
||||
|
||||
func (s *AttesterSlashing) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Attestation1 *IndexedAttestation `json:"attestation_1,omitempty"`
|
||||
Attestation2 *IndexedAttestation `json:"attestation_2,omitempty"`
|
||||
}{
|
||||
Attestation1: &IndexedAttestation{s.Attestation_1},
|
||||
Attestation2: &IndexedAttestation{s.Attestation_2},
|
||||
})
|
||||
}
|
||||
|
||||
type Checkpoint struct {
|
||||
*eth.Checkpoint
|
||||
}
|
||||
|
||||
func (c *Checkpoint) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Epoch string `json:"epoch,omitempty"`
|
||||
Root hexutil.Bytes `json:"root,omitempty"`
|
||||
}{
|
||||
Epoch: fmt.Sprintf("%d", c.Checkpoint.Epoch),
|
||||
Root: c.Checkpoint.Root,
|
||||
})
|
||||
}
|
||||
|
||||
type AttestationData struct {
|
||||
*eth.AttestationData
|
||||
}
|
||||
|
||||
func (a *AttestationData) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Slot string `json:"slot,omitempty"`
|
||||
Index string `json:"index,omitempty"`
|
||||
BeaconBlockRoot hexutil.Bytes `json:"beacon_block_root,omitempty"`
|
||||
Source *Checkpoint `json:"source,omitempty"`
|
||||
Target *Checkpoint `json:"target,omitempty"`
|
||||
}{
|
||||
Slot: fmt.Sprintf("%d", a.AttestationData.Slot),
|
||||
Index: fmt.Sprintf("%d", a.AttestationData.CommitteeIndex),
|
||||
BeaconBlockRoot: a.AttestationData.BeaconBlockRoot,
|
||||
Source: &Checkpoint{a.AttestationData.Source},
|
||||
Target: &Checkpoint{a.AttestationData.Target},
|
||||
})
|
||||
}
|
||||
|
||||
type Attestation struct {
|
||||
*eth.Attestation
|
||||
}
|
||||
|
||||
func (a *Attestation) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
AggregationBits hexutil.Bytes `json:"aggregation_bits,omitempty"`
|
||||
Data *AttestationData `json:"data,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty" ssz-size:"96"`
|
||||
}{
|
||||
AggregationBits: hexutil.Bytes(a.Attestation.AggregationBits),
|
||||
Data: &AttestationData{a.Attestation.Data},
|
||||
Signature: a.Attestation.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
type DepositData struct {
|
||||
*eth.Deposit_Data
|
||||
}
|
||||
|
||||
func (d *DepositData) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
PublicKey hexutil.Bytes `json:"pubkey,omitempty"`
|
||||
WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials,omitempty"`
|
||||
Amount string `json:"amount,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{
|
||||
PublicKey: d.PublicKey,
|
||||
WithdrawalCredentials: d.WithdrawalCredentials,
|
||||
Amount: fmt.Sprintf("%d", d.Amount),
|
||||
Signature: d.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
type Deposit struct {
|
||||
*eth.Deposit
|
||||
}
|
||||
|
||||
func (d *Deposit) MarshalJSON() ([]byte, error) {
|
||||
proof := make([]hexutil.Bytes, len(d.Proof))
|
||||
for i := range d.Proof {
|
||||
proof[i] = d.Proof[i]
|
||||
}
|
||||
return json.Marshal(struct {
|
||||
Proof []hexutil.Bytes `json:"proof"`
|
||||
Data *DepositData `json:"data"`
|
||||
}{
|
||||
Proof: proof,
|
||||
Data: &DepositData{Deposit_Data: d.Deposit.Data},
|
||||
})
|
||||
}
|
||||
|
||||
type SignedVoluntaryExit struct {
|
||||
*eth.SignedVoluntaryExit
|
||||
}
|
||||
|
||||
func (sve *SignedVoluntaryExit) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *VoluntaryExit `json:"message,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{
|
||||
Signature: sve.SignedVoluntaryExit.Signature,
|
||||
Message: &VoluntaryExit{sve.SignedVoluntaryExit.Exit},
|
||||
})
|
||||
}
|
||||
|
||||
type VoluntaryExit struct {
|
||||
*eth.VoluntaryExit
|
||||
}
|
||||
|
||||
func (ve *VoluntaryExit) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Epoch string `json:"epoch,omitempty"`
|
||||
ValidatorIndex string `json:"validator_index,omitempty"`
|
||||
}{
|
||||
Epoch: fmt.Sprintf("%d", ve.Epoch),
|
||||
ValidatorIndex: fmt.Sprintf("%d", ve.ValidatorIndex),
|
||||
})
|
||||
}
|
||||
|
||||
type SyncAggregate struct {
|
||||
*eth.SyncAggregate
|
||||
}
|
||||
|
||||
func (s *SyncAggregate) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
SyncCommitteeBits hexutil.Bytes `json:"sync_committee_bits,omitempty"`
|
||||
SyncCommitteeSignature hexutil.Bytes `json:"sync_committee_signature,omitempty"`
|
||||
}{
|
||||
SyncCommitteeBits: hexutil.Bytes(s.SyncAggregate.SyncCommitteeBits),
|
||||
SyncCommitteeSignature: s.SyncAggregate.SyncCommitteeSignature,
|
||||
})
|
||||
}
|
||||
|
||||
type Eth1Data struct {
|
||||
*eth.Eth1Data
|
||||
}
|
||||
|
||||
func (e *Eth1Data) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
DepositRoot hexutil.Bytes `json:"deposit_root,omitempty"`
|
||||
DepositCount string `json:"deposit_count,omitempty"`
|
||||
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
|
||||
}{
|
||||
DepositRoot: e.DepositRoot,
|
||||
DepositCount: fmt.Sprintf("%d", e.DepositCount),
|
||||
BlockHash: e.BlockHash,
|
||||
})
|
||||
}
|
||||
|
||||
func (b *BlindedBeaconBlockBodyBellatrix) MarshalJSON() ([]byte, error) {
|
||||
sve := make([]*SignedVoluntaryExit, len(b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits {
|
||||
sve[i] = &SignedVoluntaryExit{SignedVoluntaryExit: b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits[i]}
|
||||
}
|
||||
deps := make([]*Deposit, len(b.BlindedBeaconBlockBodyBellatrix.Deposits))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.Deposits {
|
||||
deps[i] = &Deposit{Deposit: b.BlindedBeaconBlockBodyBellatrix.Deposits[i]}
|
||||
}
|
||||
atts := make([]*Attestation, len(b.BlindedBeaconBlockBodyBellatrix.Attestations))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.Attestations {
|
||||
atts[i] = &Attestation{Attestation: b.BlindedBeaconBlockBodyBellatrix.Attestations[i]}
|
||||
}
|
||||
atsl := make([]*AttesterSlashing, len(b.BlindedBeaconBlockBodyBellatrix.AttesterSlashings))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.AttesterSlashings {
|
||||
atsl[i] = &AttesterSlashing{AttesterSlashing: b.BlindedBeaconBlockBodyBellatrix.AttesterSlashings[i]}
|
||||
}
|
||||
pros := make([]*ProposerSlashing, len(b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings {
|
||||
pros[i] = &ProposerSlashing{ProposerSlashing: b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings[i]}
|
||||
}
|
||||
return json.Marshal(struct {
|
||||
RandaoReveal hexutil.Bytes `json:"randao_reveal,omitempty"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data,omitempty"`
|
||||
Graffiti hexutil.Bytes `json:"graffiti,omitempty"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings,omitempty"`
|
||||
AttesterSlashings []*AttesterSlashing `json:"attester_slashings,omitempty"`
|
||||
Attestations []*Attestation `json:"attestations,omitempty"`
|
||||
Deposits []*Deposit `json:"deposits,omitempty"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits,omitempty"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate,omitempty"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header,omitempty"`
|
||||
}{
|
||||
RandaoReveal: b.RandaoReveal,
|
||||
Eth1Data: &Eth1Data{b.BlindedBeaconBlockBodyBellatrix.Eth1Data},
|
||||
Graffiti: b.BlindedBeaconBlockBodyBellatrix.Graffiti,
|
||||
ProposerSlashings: pros,
|
||||
AttesterSlashings: atsl,
|
||||
Attestations: atts,
|
||||
Deposits: deps,
|
||||
VoluntaryExits: sve,
|
||||
SyncAggregate: &SyncAggregate{b.BlindedBeaconBlockBodyBellatrix.SyncAggregate},
|
||||
ExecutionPayloadHeader: &ExecutionPayloadHeader{ExecutionPayloadHeader: b.BlindedBeaconBlockBodyBellatrix.ExecutionPayloadHeader},
|
||||
})
|
||||
}
|
||||
693
api/client/builder/types_test.go
Normal file
693
api/client/builder/types_test.go
Normal file
@@ -0,0 +1,693 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func ezDecode(t *testing.T, s string) []byte {
|
||||
v, err := hexutil.Decode(s)
|
||||
require.NoError(t, err)
|
||||
return v
|
||||
}
|
||||
|
||||
func TestSignedValidatorRegistration_MarshalJSON(t *testing.T) {
|
||||
svr := ð.SignedValidatorRegistrationV1{
|
||||
Message: ð.ValidatorRegistrationV1{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
GasLimit: 0,
|
||||
Timestamp: 0,
|
||||
Pubkey: make([]byte, 48),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
je, err := json.Marshal(&SignedValidatorRegistration{SignedValidatorRegistrationV1: svr})
|
||||
require.NoError(t, err)
|
||||
// decode with a struct w/ plain strings so we can check the string encoding of the hex fields
|
||||
un := struct {
|
||||
Message struct {
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
Pubkey string `json:"pubkey"`
|
||||
} `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}{}
|
||||
require.NoError(t, json.Unmarshal(je, &un))
|
||||
require.Equal(t, "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", un.Signature)
|
||||
require.Equal(t, "0x0000000000000000000000000000000000000000", un.Message.FeeRecipient)
|
||||
require.Equal(t, "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", un.Message.Pubkey)
|
||||
}
|
||||
|
||||
var testExampleHeaderResponse = `{
|
||||
"version": "bellatrix",
|
||||
"data": {
|
||||
"message": {
|
||||
"header": {
|
||||
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"block_number": "1",
|
||||
"gas_limit": "1",
|
||||
"gas_used": "1",
|
||||
"timestamp": "1",
|
||||
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
},
|
||||
"value": "652312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
},
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
}
|
||||
}`
|
||||
|
||||
func TestExecutionHeaderResponseUnmarshal(t *testing.T) {
|
||||
hr := &ExecHeaderResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponse), hr))
|
||||
cases := []struct {
|
||||
expected string
|
||||
actual string
|
||||
name string
|
||||
}{
|
||||
{
|
||||
expected: "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505",
|
||||
actual: hexutil.Encode(hr.Data.Signature),
|
||||
name: "Signature",
|
||||
},
|
||||
{
|
||||
expected: "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
|
||||
actual: hexutil.Encode(hr.Data.Message.Pubkey),
|
||||
name: "ExecHeaderResponse.Pubkey",
|
||||
},
|
||||
{
|
||||
expected: "652312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
actual: hr.Data.Message.Value.String(),
|
||||
name: "ExecHeaderResponse.Value",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.ParentHash),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.ParentHash",
|
||||
},
|
||||
{
|
||||
expected: "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.FeeRecipient),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.FeeRecipient",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.StateRoot),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.StateRoot",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.ReceiptsRoot),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.ReceiptsRoot",
|
||||
},
|
||||
{
|
||||
expected: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.LogsBloom),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.LogsBloom",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.PrevRandao),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.PrevRandao",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.BlockNumber),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.BlockNumber",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.GasLimit),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.GasLimit",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.GasUsed),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.GasUsed",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.Timestamp),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.Timestamp",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.ExtraData),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.ExtraData",
|
||||
},
|
||||
{
|
||||
expected: "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.BaseFeePerGas),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.BaseFeePerGas",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.BlockHash),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.BlockHash",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.TransactionsRoot),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.TransactionsRoot",
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
require.Equal(t, c.expected, c.actual, fmt.Sprintf("unexpected value for field %s", c.name))
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecutionHeaderResponseToProto(t *testing.T) {
|
||||
bfpg := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
v := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
hr := &ExecHeaderResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponse), hr))
|
||||
p, err := hr.ToProto()
|
||||
require.NoError(t, err)
|
||||
signature, err := hexutil.Decode("0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505")
|
||||
require.NoError(t, err)
|
||||
pubkey, err := hexutil.Decode("0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
||||
require.NoError(t, err)
|
||||
parentHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
feeRecipient, err := hexutil.Decode("0xabcf8e0d4e9587369b2301d0790347320302cc09")
|
||||
require.NoError(t, err)
|
||||
stateRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
receiptsRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
logsBloom, err := hexutil.Decode("0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||
require.NoError(t, err)
|
||||
prevRandao, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
extraData, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
blockHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
txRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := ð.SignedBuilderBid{
|
||||
Message: ð.BuilderBid{
|
||||
Header: ð.ExecutionPayloadHeader{
|
||||
ParentHash: parentHash,
|
||||
FeeRecipient: feeRecipient,
|
||||
StateRoot: stateRoot,
|
||||
ReceiptsRoot: receiptsRoot,
|
||||
LogsBloom: logsBloom,
|
||||
PrevRandao: prevRandao,
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: extraData,
|
||||
BaseFeePerGas: bfpg.SSZBytes(),
|
||||
BlockHash: blockHash,
|
||||
TransactionsRoot: txRoot,
|
||||
},
|
||||
Value: v.SSZBytes(),
|
||||
Pubkey: pubkey,
|
||||
},
|
||||
Signature: signature,
|
||||
}
|
||||
require.DeepEqual(t, expected, p)
|
||||
}
|
||||
|
||||
var testExampleExecutionPayload = `{
|
||||
"version": "bellatrix",
|
||||
"data": {
|
||||
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"block_number": "1",
|
||||
"gas_limit": "1",
|
||||
"gas_used": "1",
|
||||
"timestamp": "1",
|
||||
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"transactions": [
|
||||
"0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
|
||||
]
|
||||
}
|
||||
}`
|
||||
|
||||
func TestExecutionPayloadResponseUnmarshal(t *testing.T) {
|
||||
epr := &ExecPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), epr))
|
||||
cases := []struct {
|
||||
expected string
|
||||
actual string
|
||||
name string
|
||||
}{
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ParentHash),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ParentHash",
|
||||
},
|
||||
{
|
||||
expected: "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
actual: hexutil.Encode(epr.Data.FeeRecipient),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.FeeRecipient",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.StateRoot),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.StateRoot",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ReceiptsRoot),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ReceiptsRoot",
|
||||
},
|
||||
{
|
||||
expected: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
actual: hexutil.Encode(epr.Data.LogsBloom),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.LogsBloom",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.PrevRandao),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.PrevRandao",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.BlockNumber),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BlockNumber",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.GasLimit),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.GasLimit",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.GasUsed),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.GasUsed",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.Timestamp),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.Timestamp",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ExtraData),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ExtraData",
|
||||
},
|
||||
{
|
||||
expected: "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
actual: fmt.Sprintf("%d", epr.Data.BaseFeePerGas),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BaseFeePerGas",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.BlockHash),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BlockHash",
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
require.Equal(t, c.expected, c.actual, fmt.Sprintf("unexpected value for field %s", c.name))
|
||||
}
|
||||
require.Equal(t, 1, len(epr.Data.Transactions))
|
||||
txHash := "0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
|
||||
require.Equal(t, txHash, hexutil.Encode(epr.Data.Transactions[0]))
|
||||
}
|
||||
|
||||
func TestExecutionPayloadResponseToProto(t *testing.T) {
|
||||
hr := &ExecPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), hr))
|
||||
p, err := hr.ToProto()
|
||||
require.NoError(t, err)
|
||||
|
||||
parentHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
feeRecipient, err := hexutil.Decode("0xabcf8e0d4e9587369b2301d0790347320302cc09")
|
||||
require.NoError(t, err)
|
||||
stateRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
receiptsRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
logsBloom, err := hexutil.Decode("0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||
require.NoError(t, err)
|
||||
prevRandao, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
extraData, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
blockHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
|
||||
tx, err := hexutil.Decode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")
|
||||
require.NoError(t, err)
|
||||
txList := [][]byte{tx}
|
||||
|
||||
bfpg := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
expected := &v1.ExecutionPayload{
|
||||
ParentHash: parentHash,
|
||||
FeeRecipient: feeRecipient,
|
||||
StateRoot: stateRoot,
|
||||
ReceiptsRoot: receiptsRoot,
|
||||
LogsBloom: logsBloom,
|
||||
PrevRandao: prevRandao,
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: extraData,
|
||||
BaseFeePerGas: bfpg.SSZBytes(),
|
||||
BlockHash: blockHash,
|
||||
Transactions: txList,
|
||||
}
|
||||
require.DeepEqual(t, expected, p)
|
||||
}
|
||||
|
||||
func pbEth1Data(t *testing.T) *eth.Eth1Data {
|
||||
return ð.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
DepositCount: 23,
|
||||
BlockHash: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
|
||||
func TestEth1DataMarshal(t *testing.T) {
|
||||
ed := &Eth1Data{
|
||||
Eth1Data: pbEth1Data(t),
|
||||
}
|
||||
b, err := json.Marshal(ed)
|
||||
require.NoError(t, err)
|
||||
expected := `{"deposit_root":"0x0000000000000000000000000000000000000000000000000000000000000000","deposit_count":"23","block_hash":"0x0000000000000000000000000000000000000000000000000000000000000000"}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbSyncAggregate() *eth.SyncAggregate {
|
||||
return ð.SyncAggregate{
|
||||
SyncCommitteeSignature: make([]byte, 48),
|
||||
SyncCommitteeBits: bitfield.Bitvector512{0x01},
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncAggregate_MarshalJSON(t *testing.T) {
|
||||
sa := &SyncAggregate{pbSyncAggregate()}
|
||||
b, err := json.Marshal(sa)
|
||||
require.NoError(t, err)
|
||||
expected := `{"sync_committee_bits":"0x01","sync_committee_signature":"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbDeposit(t *testing.T) *eth.Deposit {
|
||||
return ð.Deposit{
|
||||
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
|
||||
Data: ð.Deposit_Data{
|
||||
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Amount: 1,
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeposit_MarshalJSON(t *testing.T) {
|
||||
d := &Deposit{
|
||||
Deposit: pbDeposit(t),
|
||||
}
|
||||
b, err := json.Marshal(d)
|
||||
require.NoError(t, err)
|
||||
expected := `{"proof":["0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"],"data":{"pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a","withdrawal_credentials":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","amount":"1","signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbSignedVoluntaryExit(t *testing.T) *eth.SignedVoluntaryExit {
|
||||
return ð.SignedVoluntaryExit{
|
||||
Exit: ð.VoluntaryExit{
|
||||
Epoch: 1,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
}
|
||||
}
|
||||
|
||||
func TestVoluntaryExit(t *testing.T) {
|
||||
ve := &SignedVoluntaryExit{
|
||||
SignedVoluntaryExit: pbSignedVoluntaryExit(t),
|
||||
}
|
||||
b, err := json.Marshal(ve)
|
||||
require.NoError(t, err)
|
||||
expected := `{"message":{"epoch":"1","validator_index":"1"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbAttestation(t *testing.T) *eth.Attestation {
|
||||
return ð.Attestation{
|
||||
AggregationBits: bitfield.Bitlist{0x01},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttestationMarshal(t *testing.T) {
|
||||
a := &Attestation{
|
||||
Attestation: pbAttestation(t),
|
||||
}
|
||||
b, err := json.Marshal(a)
|
||||
require.NoError(t, err)
|
||||
expected := `{"aggregation_bits":"0x01","data":{"slot":"1","index":"1","beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","source":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"target":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbAttesterSlashing(t *testing.T) *eth.AttesterSlashing {
|
||||
return ð.AttesterSlashing{
|
||||
Attestation_1: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Attestation_2: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttesterSlashing_MarshalJSON(t *testing.T) {
|
||||
as := &AttesterSlashing{
|
||||
AttesterSlashing: pbAttesterSlashing(t),
|
||||
}
|
||||
b, err := json.Marshal(as)
|
||||
require.NoError(t, err)
|
||||
expected := `{"attestation_1":{"attesting_indices":["1"],"data":{"slot":"1","index":"1","beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","source":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"target":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"},"attestation_2":{"attesting_indices":["1"],"data":{"slot":"1","index":"1","beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","source":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"target":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbProposerSlashing(t *testing.T) *eth.ProposerSlashing {
|
||||
return ð.ProposerSlashing{
|
||||
Header_1: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
Header_2: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestProposerSlashings(t *testing.T) {
|
||||
ps := &ProposerSlashing{ProposerSlashing: pbProposerSlashing(t)}
|
||||
b, err := json.Marshal(ps)
|
||||
require.NoError(t, err)
|
||||
expected := `{"signed_header_1":{"message":{"slot":"1","proposer_index":"1","parent_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","body_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"},"signed_header_2":{"message":{"slot":"1","proposer_index":"1","parent_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","body_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbExecutionPayloadHeader(t *testing.T) *eth.ExecutionPayloadHeader {
|
||||
bfpg := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
return ð.ExecutionPayloadHeader{
|
||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
|
||||
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BaseFeePerGas: bfpg.SSZBytes(),
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecutionPayloadHeader_MarshalJSON(t *testing.T) {
|
||||
h := &ExecutionPayloadHeader{
|
||||
ExecutionPayloadHeader: pbExecutionPayloadHeader(t),
|
||||
}
|
||||
b, err := json.Marshal(h)
|
||||
require.NoError(t, err)
|
||||
expected := `{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"452312848583266388373324160190187140051835877600158453279131187530910662656","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
var testBuilderBid = `{
|
||||
"version":"bellatrix",
|
||||
"data":{
|
||||
"message":{
|
||||
"header":{
|
||||
"parent_hash":"0xa0513a503d5bd6e89a144c3268e5b7e9da9dbf63df125a360e3950a7d0d67131",
|
||||
"fee_recipient":"0xdfb434922631787e43725c6b926e989875125751",
|
||||
"state_root":"0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45",
|
||||
"receipts_root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"prev_randao":"0xc2fa210081542a87f334b7b14a2da3275e4b281dd77b007bcfcb10e34c42052e",
|
||||
"block_number":"1",
|
||||
"gas_limit":"10000000",
|
||||
"gas_used":"0",
|
||||
"timestamp":"4660",
|
||||
"extra_data":"0x",
|
||||
"base_fee_per_gas":"7",
|
||||
"block_hash":"0x10746fa06c248e7eacd4ff8ad8b48a826c227387ee31a6aa5eb4d83ddad34f07",
|
||||
"transactions_root":"0x7ffe241ea60187fdb0187bfa22de35d1f9bed7ab061d9401fd47e34a54fbede1"
|
||||
},
|
||||
"value":"452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"pubkey":"0x8645866c95cbc2e08bc77ccad473540eddf4a1f51a2a8edc8d7a673824218f7f68fe565f1ab38dadd5c855b45bbcec95"
|
||||
},
|
||||
"signature":"0x9183ebc1edf9c3ab2bbd7abdc3b59c6b249d6647b5289a97eea36d9d61c47f12e283f64d928b1e7f5b8a5182b714fa921954678ea28ca574f5f232b2f78cf8900915a2993b396e3471e0655291fec143a300d41408f66478c8208e0f9be851dc"
|
||||
}
|
||||
}`
|
||||
|
||||
func TestBuilderBidUnmarshalUint256(t *testing.T) {
|
||||
base10 := "452312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
var expectedValue big.Int
|
||||
require.NoError(t, expectedValue.UnmarshalText([]byte(base10)))
|
||||
r := &ExecHeaderResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testBuilderBid), r))
|
||||
//require.Equal(t, expectedValue, r.Data.Message.Value)
|
||||
marshaled := r.Data.Message.Value.String()
|
||||
require.Equal(t, base10, marshaled)
|
||||
require.Equal(t, 0, expectedValue.Cmp(r.Data.Message.Value.Int))
|
||||
}
|
||||
|
||||
func TestMathBigUnmarshal(t *testing.T) {
|
||||
base10 := "452312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
var expectedValue big.Int
|
||||
require.NoError(t, expectedValue.UnmarshalText([]byte(base10)))
|
||||
marshaled, err := expectedValue.MarshalText()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, base10, string(marshaled))
|
||||
|
||||
var u256 Uint256
|
||||
require.NoError(t, u256.UnmarshalText([]byte("452312848583266388373324160190187140051835877600158453279131187530910662656")))
|
||||
}
|
||||
|
||||
func TestUint256Unmarshal(t *testing.T) {
|
||||
base10 := "452312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
bi := new(big.Int)
|
||||
bi, ok := bi.SetString(base10, 10)
|
||||
require.Equal(t, true, ok)
|
||||
s := struct {
|
||||
BigNumber Uint256 `json:"big_number"`
|
||||
}{
|
||||
BigNumber: Uint256{Int: bi},
|
||||
}
|
||||
m, err := json.Marshal(s)
|
||||
require.NoError(t, err)
|
||||
expected := `{"big_number":"452312848583266388373324160190187140051835877600158453279131187530910662656"}`
|
||||
require.Equal(t, expected, string(m))
|
||||
}
|
||||
|
||||
func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
|
||||
expected, err := os.ReadFile("testdata/blinded-block.json")
|
||||
require.NoError(t, err)
|
||||
b := &BlindedBeaconBlockBellatrix{BlindedBeaconBlockBellatrix: ð.BlindedBeaconBlockBellatrix{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Body: ð.BlindedBeaconBlockBodyBellatrix{
|
||||
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
Eth1Data: pbEth1Data(t),
|
||||
Graffiti: ezDecode(t, "0xdeadbeefc0ffee"),
|
||||
ProposerSlashings: []*eth.ProposerSlashing{pbProposerSlashing(t)},
|
||||
AttesterSlashings: []*eth.AttesterSlashing{pbAttesterSlashing(t)},
|
||||
Attestations: []*eth.Attestation{pbAttestation(t)},
|
||||
Deposits: []*eth.Deposit{pbDeposit(t)},
|
||||
VoluntaryExits: []*eth.SignedVoluntaryExit{pbSignedVoluntaryExit(t)},
|
||||
SyncAggregate: pbSyncAggregate(),
|
||||
ExecutionPayloadHeader: pbExecutionPayloadHeader(t),
|
||||
},
|
||||
}}
|
||||
m, err := json.Marshal(b)
|
||||
require.NoError(t, err)
|
||||
// string error output is easier to deal with
|
||||
// -1 end slice index on expected is to get rid of trailing newline
|
||||
// if you update this fixture and this test breaks, you probably removed the trailing newline
|
||||
require.Equal(t, string(expected[0:len(expected)-1]), string(m))
|
||||
}
|
||||
@@ -110,6 +110,7 @@ go_test(
|
||||
"log_test.go",
|
||||
"metrics_test.go",
|
||||
"mock_test.go",
|
||||
"new_slot_test.go",
|
||||
"pow_block_test.go",
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
@@ -137,6 +138,7 @@ go_test(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
@@ -188,6 +190,7 @@ go_test(
|
||||
"//beacon-chain/powchain/testing:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
@@ -20,7 +22,7 @@ import (
|
||||
)
|
||||
|
||||
// ChainInfoFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves chain info related data.
|
||||
// directly retrieve chain info related data.
|
||||
type ChainInfoFetcher interface {
|
||||
HeadFetcher
|
||||
FinalizationFetcher
|
||||
@@ -31,6 +33,12 @@ type ChainInfoFetcher interface {
|
||||
HeadDomainFetcher
|
||||
}
|
||||
|
||||
// HeadUpdater defines a common interface for methods in blockchain service
|
||||
// which allow to update the head info
|
||||
type HeadUpdater interface {
|
||||
UpdateHead(context.Context) error
|
||||
}
|
||||
|
||||
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
|
||||
type TimeFetcher interface {
|
||||
GenesisTime() time.Time
|
||||
@@ -43,7 +51,7 @@ type GenesisFetcher interface {
|
||||
}
|
||||
|
||||
// HeadFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves head related data.
|
||||
// directly retrieve head related data.
|
||||
type HeadFetcher interface {
|
||||
HeadSlot() types.Slot
|
||||
HeadRoot(ctx context.Context) ([]byte, error)
|
||||
@@ -55,8 +63,6 @@ type HeadFetcher interface {
|
||||
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool)
|
||||
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error)
|
||||
ChainHeads() ([][32]byte, []types.Slot)
|
||||
IsOptimistic(ctx context.Context) (bool, error)
|
||||
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
|
||||
HeadSyncCommitteeFetcher
|
||||
HeadDomainFetcher
|
||||
}
|
||||
@@ -73,53 +79,62 @@ type CanonicalFetcher interface {
|
||||
}
|
||||
|
||||
// FinalizationFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves finalization and justification related data.
|
||||
// directly retrieve finalization and justification related data.
|
||||
type FinalizationFetcher interface {
|
||||
FinalizedCheckpt() *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckpt() *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckpt() *ethpb.Checkpoint
|
||||
FinalizedCheckpt() (*ethpb.Checkpoint, error)
|
||||
CurrentJustifiedCheckpt() (*ethpb.Checkpoint, error)
|
||||
PreviousJustifiedCheckpt() (*ethpb.Checkpoint, error)
|
||||
VerifyFinalizedBlkDescendant(ctx context.Context, blockRoot [32]byte) error
|
||||
}
|
||||
|
||||
// OptimisticModeFetcher retrieves information about optimistic status of the node.
|
||||
type OptimisticModeFetcher interface {
|
||||
IsOptimistic(ctx context.Context) (bool, error)
|
||||
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
|
||||
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
cp := s.store.FinalizedCheckpt()
|
||||
if cp == nil {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
func (s *Service) FinalizedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
cp, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ethpb.CopyCheckpoint(cp)
|
||||
return ethpb.CopyCheckpoint(cp), nil
|
||||
}
|
||||
|
||||
// CurrentJustifiedCheckpt returns the current justified checkpoint from chain store.
|
||||
func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
cp := s.store.JustifiedCheckpt()
|
||||
if cp == nil {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
func (s *Service) CurrentJustifiedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
cp, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ethpb.CopyCheckpoint(cp)
|
||||
return ethpb.CopyCheckpoint(cp), nil
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpt returns the previous justified checkpoint from chain store.
|
||||
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
cp := s.store.PrevJustifiedCheckpt()
|
||||
if cp == nil {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
func (s *Service) PreviousJustifiedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
cp, err := s.store.PrevJustifiedCheckpt()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ethpb.CopyCheckpoint(cp)
|
||||
return ethpb.CopyCheckpoint(cp), nil
|
||||
}
|
||||
|
||||
// BestJustifiedCheckpt returns the best justified checkpoint from store.
|
||||
func (s *Service) BestJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
cp := s.store.BestJustifiedCheckpt()
|
||||
// If there is no best justified checkpoint, return the checkpoint with root as zeros to be used for genesis cases.
|
||||
if cp == nil {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
func (s *Service) BestJustifiedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
cp, err := s.store.BestJustifiedCheckpt()
|
||||
if err != nil {
|
||||
// If there is no best justified checkpoint, return the checkpoint with root as zeros to be used for genesis cases.
|
||||
if errors.Is(err, store.ErrNilCheckpoint) {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ethpb.CopyCheckpoint(cp)
|
||||
return ethpb.CopyCheckpoint(cp), nil
|
||||
}
|
||||
|
||||
// HeadSlot returns the slot of the head of the chain.
|
||||
@@ -232,7 +247,7 @@ func (s *Service) GenesisTime() time.Time {
|
||||
return s.genesisTime
|
||||
}
|
||||
|
||||
// GenesisValidatorsRoot returns the genesis validator
|
||||
// GenesisValidatorsRoot returns the genesis validators
|
||||
// root of the chain.
|
||||
func (s *Service) GenesisValidatorsRoot() [32]byte {
|
||||
s.headLock.RLock()
|
||||
@@ -299,7 +314,7 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index types.V
|
||||
return v.PublicKey(), nil
|
||||
}
|
||||
|
||||
// ForkChoicer returns the forkchoice interface
|
||||
// ForkChoicer returns the forkchoice interface.
|
||||
func (s *Service) ForkChoicer() forkchoice.ForkChoicer {
|
||||
return s.cfg.ForkChoiceStore
|
||||
}
|
||||
@@ -315,7 +330,7 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
||||
return s.IsOptimisticForRoot(ctx, s.head.root)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root and slot as arguments instead of the current head
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
|
||||
@@ -345,7 +360,7 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
|
||||
// Checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
|
||||
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(validatedCheckpoint.Root)))
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -363,7 +378,7 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
||||
return false, err
|
||||
}
|
||||
|
||||
// historical non-canonical blocks here are returned as optimistic for safety.
|
||||
// Historical non-canonical blocks here are returned as optimistic for safety.
|
||||
return !isCanonical, nil
|
||||
}
|
||||
|
||||
@@ -372,7 +387,7 @@ func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t
|
||||
}
|
||||
|
||||
// ForkChoiceStore returns the fork choice store in the service
|
||||
// ForkChoiceStore returns the fork choice store in the service.
|
||||
func (s *Service) ForkChoiceStore() forkchoice.ForkChoicer {
|
||||
return s.cfg.ForkChoiceStore
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
@@ -26,12 +27,6 @@ var _ ChainInfoFetcher = (*Service)(nil)
|
||||
var _ TimeFetcher = (*Service)(nil)
|
||||
var _ ForkFetcher = (*Service)(nil)
|
||||
|
||||
func TestFinalizedCheckpt_Nil(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], c.FinalizedCheckpt().Root, "Incorrect pre chain start value")
|
||||
}
|
||||
|
||||
func TestHeadRoot_Nil(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
@@ -51,9 +46,11 @@ func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte("foo"), 32)}
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
c.store.SetFinalizedCheckpt(cp)
|
||||
c.store.SetFinalizedCheckptAndPayloadHash(cp, [32]byte{'a'})
|
||||
|
||||
assert.Equal(t, cp.Epoch, c.FinalizedCheckpt().Epoch, "Unexpected finalized epoch")
|
||||
cp, err := c.FinalizedCheckpt()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, cp.Epoch, cp.Epoch, "Unexpected finalized epoch")
|
||||
}
|
||||
|
||||
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
@@ -62,19 +59,24 @@ func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
genesisRoot := [32]byte{'A'}
|
||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
c.store.SetFinalizedCheckpt(cp)
|
||||
c.store.SetFinalizedCheckptAndPayloadHash(cp, [32]byte{'a'})
|
||||
c.originBlockRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.originBlockRoot[:], c.FinalizedCheckpt().Root)
|
||||
cp, err := c.FinalizedCheckpt()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, c.originBlockRoot[:], cp.Root)
|
||||
}
|
||||
|
||||
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
assert.Equal(t, params.BeaconConfig().ZeroHash, bytesutil.ToBytes32(c.CurrentJustifiedCheckpt().Root), "Unexpected justified epoch")
|
||||
_, err := c.CurrentJustifiedCheckpt()
|
||||
require.ErrorIs(t, err, store.ErrNilCheckpoint)
|
||||
cp := ðpb.Checkpoint{Epoch: 6, Root: bytesutil.PadTo([]byte("foo"), 32)}
|
||||
c.store.SetJustifiedCheckpt(cp)
|
||||
assert.Equal(t, cp.Epoch, c.CurrentJustifiedCheckpt().Epoch, "Unexpected justified epoch")
|
||||
c.store.SetJustifiedCheckptAndPayloadHash(cp, [32]byte{})
|
||||
jp, err := c.CurrentJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, cp.Epoch, jp.Epoch, "Unexpected justified epoch")
|
||||
}
|
||||
|
||||
func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
@@ -83,9 +85,11 @@ func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
genesisRoot := [32]byte{'B'}
|
||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
||||
c.store.SetJustifiedCheckpt(cp)
|
||||
c.store.SetJustifiedCheckptAndPayloadHash(cp, [32]byte{})
|
||||
c.originBlockRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.originBlockRoot[:], c.CurrentJustifiedCheckpt().Root)
|
||||
cp, err := c.CurrentJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, c.originBlockRoot[:], cp.Root)
|
||||
}
|
||||
|
||||
func TestPreviousJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
@@ -93,9 +97,12 @@ func TestPreviousJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 7, Root: bytesutil.PadTo([]byte("foo"), 32)}
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
assert.Equal(t, params.BeaconConfig().ZeroHash, bytesutil.ToBytes32(c.CurrentJustifiedCheckpt().Root), "Unexpected justified epoch")
|
||||
_, err := c.PreviousJustifiedCheckpt()
|
||||
require.ErrorIs(t, err, store.ErrNilCheckpoint)
|
||||
c.store.SetPrevJustifiedCheckpt(cp)
|
||||
assert.Equal(t, cp.Epoch, c.PreviousJustifiedCheckpt().Epoch, "Unexpected previous justified epoch")
|
||||
pcp, err := c.PreviousJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, cp.Epoch, pcp.Epoch, "Unexpected previous justified epoch")
|
||||
}
|
||||
|
||||
func TestPrevJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
@@ -106,7 +113,9 @@ func TestPrevJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
c.store.SetPrevJustifiedCheckpt(cp)
|
||||
c.originBlockRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.originBlockRoot[:], c.PreviousJustifiedCheckpt().Root)
|
||||
pcp, err := c.PreviousJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, c.originBlockRoot[:], pcp.Root)
|
||||
}
|
||||
|
||||
func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
|
||||
@@ -3,19 +3,45 @@ package blockchain
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
var (
|
||||
// errNilJustifiedInStore is returned when a nil justified checkpt is returned from store.
|
||||
errNilJustifiedInStore = errors.New("nil justified checkpoint returned from store")
|
||||
// errNilBestJustifiedInStore is returned when a nil justified checkpt is returned from store.
|
||||
errNilBestJustifiedInStore = errors.New("nil best justified checkpoint returned from store")
|
||||
// errNilFinalizedInStore is returned when a nil finalized checkpt is returned from store.
|
||||
errNilFinalizedInStore = errors.New("nil finalized checkpoint returned from store")
|
||||
// errInvalidNilSummary is returned when a nil summary is returned from the DB.
|
||||
errInvalidNilSummary = errors.New("nil summary returned from the DB")
|
||||
// errNilParentInDB is returned when a nil parent block is returned from the DB.
|
||||
errNilParentInDB = errors.New("nil parent block in DB")
|
||||
// errWrongBlockCount is returned when the wrong number of blocks or
|
||||
// block roots is used
|
||||
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
|
||||
// block is not a valid optimistic candidate block
|
||||
errNotOptimisticCandidate = errors.New("block is not suitable for optimistic sync")
|
||||
)
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
|
||||
// Some examples of invalid blocks are:
|
||||
// The block violates state transition rules.
|
||||
// The block is deemed invalid according to execution layer client.
|
||||
// The block violates certain fork choice rules (before finalized slot, not finalized ancestor)
|
||||
type invalidBlock struct {
|
||||
error
|
||||
}
|
||||
|
||||
type invalidBlockError interface {
|
||||
Error() string
|
||||
InvalidBlock() bool
|
||||
}
|
||||
|
||||
// InvalidBlock returns true for `invalidBlock`.
|
||||
func (e invalidBlock) InvalidBlock() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsInvalidBlock returns true if the error has `invalidBlock`.
|
||||
func IsInvalidBlock(e error) bool {
|
||||
if e == nil {
|
||||
return false
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
if !ok {
|
||||
return IsInvalidBlock(errors.Unwrap(e))
|
||||
}
|
||||
return d.InvalidBlock()
|
||||
}
|
||||
|
||||
17
beacon-chain/blockchain/error_test.go
Normal file
17
beacon-chain/blockchain/error_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestIsInvalidBlock(t *testing.T) {
|
||||
require.Equal(t, false, IsInvalidBlock(ErrInvalidPayload))
|
||||
err := invalidBlock{ErrInvalidPayload}
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
|
||||
newErr := errors.Wrap(err, "wrap me")
|
||||
require.Equal(t, true, IsInvalidBlock(newErr))
|
||||
}
|
||||
@@ -31,11 +31,9 @@ var (
|
||||
|
||||
// notifyForkchoiceUpdateArg is the argument for the forkchoice update notification `notifyForkchoiceUpdate`.
|
||||
type notifyForkchoiceUpdateArg struct {
|
||||
headState state.BeaconState
|
||||
headRoot [32]byte
|
||||
headBlock interfaces.BeaconBlock
|
||||
finalizedRoot [32]byte
|
||||
justifiedRoot [32]byte
|
||||
headState state.BeaconState
|
||||
headRoot [32]byte
|
||||
headBlock interfaces.BeaconBlock
|
||||
}
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
|
||||
@@ -61,18 +59,12 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
finalizedHash, err := s.getPayloadHash(ctx, arg.finalizedRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get finalized block hash")
|
||||
}
|
||||
justifiedHash, err := s.getPayloadHash(ctx, arg.justifiedRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get justified block hash")
|
||||
}
|
||||
finalizedHash := s.store.FinalizedPayloadBlockHash()
|
||||
justifiedHash := s.store.JustifiedPayloadBlockHash()
|
||||
fcs := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: headPayload.BlockHash,
|
||||
SafeBlockHash: justifiedHash,
|
||||
FinalizedBlockHash: finalizedHash,
|
||||
SafeBlockHash: justifiedHash[:],
|
||||
FinalizedBlockHash: finalizedHash[:],
|
||||
}
|
||||
|
||||
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
|
||||
@@ -89,10 +81,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
log.WithFields(logrus.Fields{
|
||||
"headSlot": headBlk.Slot(),
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
|
||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
|
||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
|
||||
}).Info("Called fork choice updated with optimistic block")
|
||||
return payloadID, s.optimisticCandidateBlock(ctx, headBlk)
|
||||
case powchain.ErrInvalidPayloadStatus:
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
headRoot := arg.headRoot
|
||||
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, headRoot, bytesutil.ToBytes32(headBlk.ParentRoot()), bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
@@ -101,12 +94,35 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r, err := s.updateHead(ctx, s.justifiedBalances.balances)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err := s.getBlock(ctx, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st, err := s.cfg.StateGen.StateByRoot(ctx, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pid, err := s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: st,
|
||||
headRoot: r,
|
||||
headBlock: b.Block(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": headBlk.Slot(),
|
||||
"blockRoot": fmt.Sprintf("%#x", headRoot),
|
||||
"invalidCount": len(invalidRoots),
|
||||
}).Warn("Pruned invalid blocks")
|
||||
return nil, ErrInvalidPayload
|
||||
return pid, ErrInvalidPayload
|
||||
|
||||
default:
|
||||
return nil, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
@@ -125,19 +141,19 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
|
||||
// getPayloadHash returns the payload hash given the block root.
|
||||
// if the block is before bellatrix fork epoch, it returns the zero hash.
|
||||
func (s *Service) getPayloadHash(ctx context.Context, root [32]byte) ([]byte, error) {
|
||||
finalizedBlock, err := s.getBlock(ctx, s.ensureRootNotZeros(root))
|
||||
func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, error) {
|
||||
blk, err := s.getBlock(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(root)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return [32]byte{}, err
|
||||
}
|
||||
if blocks.IsPreBellatrixVersion(finalizedBlock.Block().Version()) {
|
||||
return params.BeaconConfig().ZeroHash[:], nil
|
||||
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
|
||||
return params.BeaconConfig().ZeroHash, nil
|
||||
}
|
||||
payload, err := finalizedBlock.Block().Body().ExecutionPayload()
|
||||
payload, err := blk.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload")
|
||||
return [32]byte{}, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
return payload.BlockHash, nil
|
||||
return bytesutil.ToBytes32(payload.BlockHash), nil
|
||||
}
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine on a new payload.
|
||||
@@ -158,14 +174,14 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
body := blk.Block().Body()
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(postStateHeader, body)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not determine if execution is enabled")
|
||||
return false, errors.Wrap(invalidBlock{err}, "could not determine if execution is enabled")
|
||||
}
|
||||
if !enabled {
|
||||
return true, nil
|
||||
}
|
||||
payload, err := body.ExecutionPayload()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get execution payload")
|
||||
return false, errors.Wrap(invalidBlock{err}, "could not get execution payload")
|
||||
}
|
||||
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
||||
switch err {
|
||||
@@ -197,7 +213,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
"blockRoot": fmt.Sprintf("%#x", root),
|
||||
"invalidCount": len(invalidRoots),
|
||||
}).Warn("Pruned invalid blocks")
|
||||
return false, ErrInvalidPayload
|
||||
return false, invalidBlock{ErrInvalidPayload}
|
||||
default:
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
@@ -219,14 +235,10 @@ func (s *Service) optimisticCandidateBlock(ctx context.Context, blk interfaces.B
|
||||
if blk.Slot()+params.BeaconConfig().SafeSlotsToImportOptimistically <= s.CurrentSlot() {
|
||||
return nil
|
||||
}
|
||||
|
||||
parent, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(blk.ParentRoot()))
|
||||
parent, err := s.getBlock(ctx, bytesutil.ToBytes32(blk.ParentRoot()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if parent == nil || parent.IsNil() {
|
||||
return errNilParentInDB
|
||||
}
|
||||
parentIsExecutionBlock, err := blocks.IsExecutionBlock(parent.Block().Body())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -266,7 +278,10 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"validatorIndex": proposerID,
|
||||
"burnAddress": fieldparams.EthBurnAddressHex,
|
||||
}).Error("Fee recipient not set. Using burn address")
|
||||
}).Warn("Fee recipient is currently using the burn address, " +
|
||||
"you will not be rewarded transaction fees on this setting. " +
|
||||
"Please set a different eth address as the fee recipient. " +
|
||||
"Please refer to our documentation for instructions")
|
||||
}
|
||||
case err != nil:
|
||||
return false, nil, 0, errors.Wrap(err, "could not get fee recipient in db")
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
@@ -174,12 +175,15 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: tt.newForkchoiceErr}
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, tt.finalizedRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, tt.finalizedRoot))
|
||||
fc := ðpb.Checkpoint{Epoch: 1, Root: tt.finalizedRoot[:]}
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(fc, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(fc, [32]byte{'b'})
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
headState: st,
|
||||
headRoot: tt.headRoot,
|
||||
headBlock: tt.blk,
|
||||
finalizedRoot: tt.finalizedRoot,
|
||||
justifiedRoot: tt.justifiedRoot,
|
||||
headState: st,
|
||||
headRoot: tt.headRoot,
|
||||
headBlock: tt.blk,
|
||||
}
|
||||
_, err := service.notifyForkchoiceUpdate(ctx, arg)
|
||||
if tt.errString != "" {
|
||||
@@ -191,6 +195,147 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
//
|
||||
// A <- B <- C <- D
|
||||
// \
|
||||
// ---------- E <- F
|
||||
// \
|
||||
// ------ G
|
||||
// D is the current head, attestations for F and G come late, both are invalid.
|
||||
// We switch recursively to F then G and finally to D.
|
||||
//
|
||||
// We test:
|
||||
// 1. forkchoice removes blocks F and G from the forkchoice implementation
|
||||
// 2. forkchoice removes the weights of these blocks
|
||||
// 3. the blockchain package calls fcu to obtain heads G -> F -> D.
|
||||
|
||||
func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
// Prepare blocks
|
||||
ba := util.NewBeaconBlockBellatrix()
|
||||
ba.Block.Body.ExecutionPayload.BlockNumber = 1
|
||||
wba, err := wrapper.WrappedSignedBeaconBlock(ba)
|
||||
require.NoError(t, err)
|
||||
bra, err := wba.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wba))
|
||||
|
||||
bb := util.NewBeaconBlockBellatrix()
|
||||
bb.Block.Body.ExecutionPayload.BlockNumber = 2
|
||||
wbb, err := wrapper.WrappedSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
brb, err := wbb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbb))
|
||||
|
||||
bc := util.NewBeaconBlockBellatrix()
|
||||
bc.Block.Body.ExecutionPayload.BlockNumber = 3
|
||||
wbc, err := wrapper.WrappedSignedBeaconBlock(bc)
|
||||
require.NoError(t, err)
|
||||
brc, err := wbc.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbc))
|
||||
|
||||
bd := util.NewBeaconBlockBellatrix()
|
||||
pd := [32]byte{'D'}
|
||||
bd.Block.Body.ExecutionPayload.BlockHash = pd[:]
|
||||
bd.Block.Body.ExecutionPayload.BlockNumber = 4
|
||||
wbd, err := wrapper.WrappedSignedBeaconBlock(bd)
|
||||
require.NoError(t, err)
|
||||
brd, err := wbd.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbd))
|
||||
|
||||
be := util.NewBeaconBlockBellatrix()
|
||||
pe := [32]byte{'E'}
|
||||
be.Block.Body.ExecutionPayload.BlockHash = pe[:]
|
||||
be.Block.Body.ExecutionPayload.BlockNumber = 5
|
||||
wbe, err := wrapper.WrappedSignedBeaconBlock(be)
|
||||
require.NoError(t, err)
|
||||
bre, err := wbe.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbe))
|
||||
|
||||
bf := util.NewBeaconBlockBellatrix()
|
||||
pf := [32]byte{'F'}
|
||||
bf.Block.Body.ExecutionPayload.BlockHash = pf[:]
|
||||
bf.Block.Body.ExecutionPayload.BlockNumber = 6
|
||||
bf.Block.ParentRoot = bre[:]
|
||||
wbf, err := wrapper.WrappedSignedBeaconBlock(bf)
|
||||
require.NoError(t, err)
|
||||
brf, err := wbf.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbf))
|
||||
|
||||
bg := util.NewBeaconBlockBellatrix()
|
||||
bg.Block.Body.ExecutionPayload.BlockNumber = 7
|
||||
pg := [32]byte{'G'}
|
||||
bg.Block.Body.ExecutionPayload.BlockHash = pg[:]
|
||||
bg.Block.ParentRoot = bre[:]
|
||||
wbg, err := wrapper.WrappedSignedBeaconBlock(bg)
|
||||
require.NoError(t, err)
|
||||
brg, err := wbg.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbg))
|
||||
|
||||
// Insert blocks into forkchoice
|
||||
fcs := doublylinkedtree.New(0, 0)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
service.justifiedBalances.balances = []uint64{50, 100, 200}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 2, brb, bra, [32]byte{'B'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 3, brc, brb, [32]byte{'C'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 4, brd, brc, [32]byte{'D'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 5, bre, brb, [32]byte{'E'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 6, brf, bre, [32]byte{'F'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 7, brg, bre, [32]byte{'G'}, 0, 0))
|
||||
|
||||
// Insert Attestations to D, F and G so that they have higher weight than D
|
||||
// Ensure G is head
|
||||
fcs.ProcessAttestation(ctx, []uint64{0}, brd, 1)
|
||||
fcs.ProcessAttestation(ctx, []uint64{1}, brf, 1)
|
||||
fcs.ProcessAttestation(ctx, []uint64{2}, brg, 1)
|
||||
headRoot, err := fcs.Head(ctx, 0, bra, []uint64{50, 100, 200}, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, brg, headRoot)
|
||||
|
||||
// Prepare Engine Mock to return invalid unless head is D, LVH = E
|
||||
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: powchain.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: pe[:], OverrideValidHash: [32]byte{'D'}}
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
|
||||
fc := ðpb.Checkpoint{Epoch: 0, Root: bra[:]}
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(fc, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(fc, [32]byte{'b'})
|
||||
a := ¬ifyForkchoiceUpdateArg{
|
||||
headState: st,
|
||||
headBlock: wbg.Block(),
|
||||
headRoot: brg,
|
||||
}
|
||||
_, err = service.notifyForkchoiceUpdate(ctx, a)
|
||||
require.ErrorIs(t, ErrInvalidPayload, err)
|
||||
// Ensure Head is D
|
||||
headRoot, err = fcs.Head(ctx, 0, bra, service.justifiedBalances.balances, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, brd, headRoot)
|
||||
|
||||
// Ensure F and G where removed but their parent E wasn't
|
||||
require.Equal(t, false, fcs.HasNode(brf))
|
||||
require.Equal(t, false, fcs.HasNode(brg))
|
||||
require.Equal(t, true, fcs.HasNode(bre))
|
||||
}
|
||||
|
||||
func Test_NotifyNewPayload(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalTotalDifficulty = "2"
|
||||
@@ -242,12 +387,13 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 1, r, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
postState state.BeaconState
|
||||
invalidBlock bool
|
||||
isValidPayload bool
|
||||
blk interfaces.SignedBeaconBlock
|
||||
newPayloadErr error
|
||||
errString string
|
||||
name string
|
||||
}{
|
||||
{
|
||||
name: "phase 0 post state",
|
||||
@@ -279,6 +425,7 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
newPayloadErr: powchain.ErrInvalidPayloadStatus,
|
||||
errString: ErrInvalidPayload.Error(),
|
||||
isValidPayload: false,
|
||||
invalidBlock: true,
|
||||
},
|
||||
{
|
||||
name: "altair pre state, altair block",
|
||||
@@ -390,9 +537,13 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
isValidPayload, err := service.notifyNewPayload(ctx, postVersion, postHeader, tt.blk)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
if tt.invalidBlock {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.isValidPayload, isValidPayload)
|
||||
require.Equal(t, false, IsInvalidBlock(err))
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -537,15 +688,19 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
|
||||
jRoot, err := tt.justified.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, tt.justified))
|
||||
service.store.SetJustifiedCheckpt(
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(
|
||||
ðpb.Checkpoint{
|
||||
Root: jRoot[:],
|
||||
Epoch: slots.ToEpoch(tt.justified.Block().Slot()),
|
||||
})
|
||||
}, [32]byte{'a'})
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedParentBlock))
|
||||
|
||||
err = service.optimisticCandidateBlock(ctx, tt.blk)
|
||||
require.Equal(t, tt.err, err)
|
||||
if tt.err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -625,7 +780,7 @@ func Test_GetPayloadAttribute(t *testing.T) {
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, fieldparams.EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient).String())
|
||||
require.LogsContain(t, hook, "Fee recipient not set. Using burn address")
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
@@ -803,7 +958,7 @@ func TestService_getPayloadHash(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = service.getPayloadHash(ctx, [32]byte{})
|
||||
_, err = service.getPayloadHash(ctx, []byte{})
|
||||
require.ErrorIs(t, errBlockNotFoundInCacheOrDB, err)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
@@ -813,20 +968,20 @@ func TestService_getPayloadHash(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(r, wsb)
|
||||
|
||||
h, err := service.getPayloadHash(ctx, r)
|
||||
h, err := service.getPayloadHash(ctx, r[:])
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash[:], h)
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash, h)
|
||||
|
||||
bb := util.NewBeaconBlockBellatrix()
|
||||
h = []byte{'a'}
|
||||
bb.Block.Body.ExecutionPayload.BlockHash = h
|
||||
h = [32]byte{'a'}
|
||||
bb.Block.Body.ExecutionPayload.BlockHash = h[:]
|
||||
r, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(r, wsb)
|
||||
|
||||
h, err = service.getPayloadHash(ctx, r)
|
||||
h, err = service.getPayloadHash(ctx, r[:])
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []byte{'a'}, h)
|
||||
require.DeepEqual(t, [32]byte{'a'}, h)
|
||||
}
|
||||
|
||||
@@ -27,9 +27,9 @@ import (
|
||||
// UpdateAndSaveHeadWithBalances updates the beacon state head after getting justified balanced from cache.
|
||||
// This function is only used in spec-tests, it does save the head after updating it.
|
||||
func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
|
||||
cp := s.store.JustifiedCheckpt()
|
||||
if cp == nil {
|
||||
return errors.New("no justified checkpoint")
|
||||
cp, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(cp.Root))
|
||||
if err != nil {
|
||||
@@ -40,7 +40,7 @@ func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not update head")
|
||||
}
|
||||
headBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
|
||||
headBlock, err := s.getBlock(ctx, headRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -66,13 +66,13 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte,
|
||||
defer span.End()
|
||||
|
||||
// Get head from the fork choice service.
|
||||
f := s.store.FinalizedCheckpt()
|
||||
if f == nil {
|
||||
return [32]byte{}, errNilFinalizedInStore
|
||||
f, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
j := s.store.JustifiedCheckpt()
|
||||
if j == nil {
|
||||
return [32]byte{}, errNilJustifiedInStore
|
||||
j, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
// To get head before the first justified epoch, the fork choice will start with origin root
|
||||
// instead of zero hashes.
|
||||
@@ -86,7 +86,7 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte,
|
||||
// re-initiate fork choice store using the latest justified info.
|
||||
// This recovers a fatal condition and should not happen in run time.
|
||||
if !s.cfg.ForkChoiceStore.HasNode(headStartRoot) {
|
||||
jb, err := s.cfg.BeaconDB.Block(ctx, headStartRoot)
|
||||
jb, err := s.getBlock(ctx, headStartRoot)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
@@ -355,7 +355,7 @@ func (s *Service) notifyNewHeadEvent(
|
||||
// attestation pool. It also filters out the attestations that is one epoch older as a
|
||||
// defense so invalid attestations don't flow into the attestation pool.
|
||||
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte) error {
|
||||
orphanedBlk, err := s.cfg.BeaconDB.Block(ctx, orphanedRoot)
|
||||
orphanedBlk, err := s.getBlock(ctx, orphanedRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -154,8 +154,8 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{}, [32]byte{'b'})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{})
|
||||
headRoot, err := service.updateHead(context.Background(), []uint64{})
|
||||
require.NoError(t, err)
|
||||
@@ -298,8 +298,8 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||
Root: bellatrixBlkRoot[:],
|
||||
Epoch: 1,
|
||||
}
|
||||
service.store.SetFinalizedCheckpt(fcp)
|
||||
service.store.SetJustifiedCheckpt(fcp)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(fcp, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(fcp, [32]byte{'b'})
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
|
||||
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
func init() {
|
||||
// Override network name so that hardcoded genesis files are not loaded.
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.ConfigName = "test"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
if err := params.SetActive(params.MainnetTestConfig()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
}
|
||||
}
|
||||
|
||||
// warning: only use these opts when you are certain there are no db calls
|
||||
// WARNING: only use these opts when you are certain there are no db calls
|
||||
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
|
||||
// initialization requirements w/o the overhead of db init.
|
||||
func testServiceOptsNoDB() []Option {
|
||||
|
||||
@@ -42,17 +42,17 @@ func (s *Service) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
}
|
||||
|
||||
// Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
|
||||
bj := s.store.BestJustifiedCheckpt()
|
||||
if bj == nil {
|
||||
return errNilBestJustifiedInStore
|
||||
bj, err := s.store.BestJustifiedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get best justified checkpoint")
|
||||
}
|
||||
j := s.store.JustifiedCheckpt()
|
||||
if j == nil {
|
||||
return errNilJustifiedInStore
|
||||
j, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
f := s.store.FinalizedCheckpt()
|
||||
if f == nil {
|
||||
return errNilFinalizedInStore
|
||||
f, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
if bj.Epoch > j.Epoch {
|
||||
finalizedSlot, err := slots.EpochStart(f.Epoch)
|
||||
@@ -64,7 +64,11 @@ func (s *Service) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
return err
|
||||
}
|
||||
if bytes.Equal(r, f.Root) {
|
||||
s.store.SetJustifiedCheckpt(bj)
|
||||
h, err := s.getPayloadHash(ctx, bj.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(bj, h)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -5,19 +5,22 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestService_newSlot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{})
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -25,10 +28,18 @@ func TestService_newSlot(t *testing.T) {
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
bj, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, 0, 0)) // genesis
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 32, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0)) // finalized
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0)) // justified
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 96, [32]byte{'c'}, [32]byte{'a'}, [32]byte{}, 0, 0)) // best justified
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 96, bj, [32]byte{'a'}, [32]byte{}, 0, 0)) // best justified
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 97, [32]byte{'d'}, [32]byte{}, [32]byte{}, 0, 0)) // bad
|
||||
|
||||
type args struct {
|
||||
@@ -48,7 +59,7 @@ func TestService_newSlot(t *testing.T) {
|
||||
slot: params.BeaconConfig().SlotsPerEpoch + 1,
|
||||
finalized: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'a'}, 32)},
|
||||
justified: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'b'}, 32)},
|
||||
bestJustified: ðpb.Checkpoint{Epoch: 3, Root: bytesutil.PadTo([]byte{'c'}, 32)},
|
||||
bestJustified: ðpb.Checkpoint{Epoch: 3, Root: bj[:]},
|
||||
shouldEqual: false,
|
||||
},
|
||||
},
|
||||
@@ -58,7 +69,7 @@ func TestService_newSlot(t *testing.T) {
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
finalized: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'a'}, 32)},
|
||||
justified: ðpb.Checkpoint{Epoch: 3, Root: bytesutil.PadTo([]byte{'b'}, 32)},
|
||||
bestJustified: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'c'}, 32)},
|
||||
bestJustified: ðpb.Checkpoint{Epoch: 2, Root: bj[:]},
|
||||
shouldEqual: false,
|
||||
},
|
||||
},
|
||||
@@ -78,7 +89,7 @@ func TestService_newSlot(t *testing.T) {
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
finalized: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'a'}, 32)},
|
||||
justified: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'b'}, 32)},
|
||||
bestJustified: ðpb.Checkpoint{Epoch: 3, Root: bytesutil.PadTo([]byte{'c'}, 32)},
|
||||
bestJustified: ðpb.Checkpoint{Epoch: 3, Root: bj[:]},
|
||||
shouldEqual: true,
|
||||
},
|
||||
},
|
||||
@@ -92,9 +103,17 @@ func TestService_newSlot(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.NewSlot(ctx, test.args.slot))
|
||||
if test.args.shouldEqual {
|
||||
require.DeepSSZEqual(t, service.store.BestJustifiedCheckpt(), service.store.JustifiedCheckpt())
|
||||
bcp, err := service.store.BestJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
cp, err := service.store.JustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, bcp, cp)
|
||||
} else {
|
||||
require.DeepNotSSZEqual(t, service.store.BestJustifiedCheckpt(), service.store.JustifiedCheckpt())
|
||||
bcp, err := service.store.BestJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
cp, err := service.store.JustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.DeepNotSSZEqual(t, bcp, cp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,8 +64,9 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
|
||||
return err
|
||||
}
|
||||
if !valid {
|
||||
return fmt.Errorf("invalid TTD, configTTD: %s, currentTTD: %s, parentTTD: %s",
|
||||
err := fmt.Errorf("invalid TTD, configTTD: %s, currentTTD: %s, parentTTD: %s",
|
||||
params.BeaconConfig().TerminalTotalDifficulty, mergeBlockTD, mergeBlockParentTD)
|
||||
return invalidBlock{err}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
|
||||
@@ -144,7 +144,9 @@ func Test_validateMergeBlock(t *testing.T) {
|
||||
|
||||
cfg.TerminalTotalDifficulty = "1"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
require.ErrorContains(t, "invalid TTD, configTTD: 1, currentTTD: 2, parentTTD: 1", service.validateMergeBlock(ctx, b))
|
||||
err = service.validateMergeBlock(ctx, b)
|
||||
require.ErrorContains(t, "invalid TTD, configTTD: 1, currentTTD: 2, parentTTD: 1", err)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
|
||||
@@ -324,9 +324,9 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
r := [32]byte{'g'}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r))
|
||||
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'b'})
|
||||
service.store.SetPrevFinalizedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
|
||||
r = bytesutil.ToBytes32([]byte{'A'})
|
||||
@@ -358,9 +358,9 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
|
||||
|
||||
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'b'})
|
||||
service.store.SetPrevFinalizedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
cp3 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
|
||||
@@ -500,7 +500,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 1})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 1}, [32]byte{})
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
@@ -535,7 +535,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 1})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 1}, [32]byte{})
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
@@ -564,7 +564,7 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: r32[:], Epoch: 1})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r32[:], Epoch: 1}, [32]byte{})
|
||||
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
@@ -591,7 +591,7 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: r32[:], Epoch: 1})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r32[:], Epoch: 1}, [32]byte{})
|
||||
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
|
||||
@@ -93,7 +93,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
|
||||
defer span.End()
|
||||
if err := helpers.BeaconBlockIsNil(signed); err != nil {
|
||||
return err
|
||||
return invalidBlock{err}
|
||||
}
|
||||
b := signed.Block()
|
||||
|
||||
@@ -108,7 +108,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
}
|
||||
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return err
|
||||
return invalidBlock{err}
|
||||
}
|
||||
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
|
||||
if err != nil {
|
||||
@@ -123,7 +123,9 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
|
||||
}
|
||||
@@ -146,9 +148,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
// If slasher is configured, forward the attestations in the block via
|
||||
// an event feed for processing.
|
||||
if features.Get().EnableSlasher {
|
||||
@@ -177,9 +176,9 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
}
|
||||
|
||||
// Update justified check point.
|
||||
justified := s.store.JustifiedCheckpt()
|
||||
if justified == nil {
|
||||
return errNilJustifiedInStore
|
||||
justified, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
currJustifiedEpoch := justified.Epoch
|
||||
if postState.CurrentJustifiedCheckpoint().Epoch > currJustifiedEpoch {
|
||||
@@ -188,16 +187,29 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
}
|
||||
}
|
||||
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
newFinalized := postState.FinalizedCheckpointEpoch() > finalized.Epoch
|
||||
if newFinalized {
|
||||
s.store.SetPrevFinalizedCheckpt(finalized)
|
||||
s.store.SetFinalizedCheckpt(postState.FinalizedCheckpoint())
|
||||
cp := postState.FinalizedCheckpoint()
|
||||
h, err := s.getPayloadHash(ctx, cp.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(cp, h)
|
||||
s.store.SetPrevJustifiedCheckpt(justified)
|
||||
s.store.SetJustifiedCheckpt(postState.CurrentJustifiedCheckpoint())
|
||||
cp = postState.CurrentJustifiedCheckpoint()
|
||||
h, err = s.getPayloadHash(ctx, cp.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(postState.CurrentJustifiedCheckpoint(), h)
|
||||
}
|
||||
|
||||
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(justified.Root))
|
||||
@@ -319,7 +331,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
}
|
||||
|
||||
if err := helpers.BeaconBlockIsNil(blks[0]); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, invalidBlock{err}
|
||||
}
|
||||
b := blks[0].Block()
|
||||
|
||||
@@ -362,7 +374,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
|
||||
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, invalidBlock{err}
|
||||
}
|
||||
// Save potential boundary states.
|
||||
if slots.IsEpochStart(preState.Slot()) {
|
||||
@@ -383,7 +395,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
}
|
||||
verify, err := sigSet.Verify()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, invalidBlock{err}
|
||||
}
|
||||
if !verify {
|
||||
return nil, nil, errors.New("batch block signature verification failed")
|
||||
@@ -413,6 +425,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
}
|
||||
}
|
||||
s.saveInitSyncBlock(blockRoots[i], b)
|
||||
if err = s.handleBlockAfterBatchVerify(ctx, b, blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for r, st := range boundaries {
|
||||
@@ -426,14 +442,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
f := fCheckpoints[len(fCheckpoints)-1]
|
||||
j := jCheckpoints[len(jCheckpoints)-1]
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
headState: preState,
|
||||
headRoot: lastBR,
|
||||
headBlock: lastB.Block(),
|
||||
finalizedRoot: bytesutil.ToBytes32(f.Root),
|
||||
justifiedRoot: bytesutil.ToBytes32(j.Root),
|
||||
headState: preState,
|
||||
headRoot: lastBR,
|
||||
headBlock: lastB.Block(),
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
|
||||
return nil, nil, err
|
||||
@@ -464,9 +476,9 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed interf
|
||||
s.clearInitSyncBlocks()
|
||||
}
|
||||
|
||||
justified := s.store.JustifiedCheckpt()
|
||||
if justified == nil {
|
||||
return errNilJustifiedInStore
|
||||
justified, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
if jCheckpoint.Epoch > justified.Epoch {
|
||||
if err := s.updateJustifiedInitSync(ctx, jCheckpoint); err != nil {
|
||||
@@ -474,7 +486,10 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed interf
|
||||
}
|
||||
}
|
||||
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
@@ -484,7 +499,11 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed interf
|
||||
return err
|
||||
}
|
||||
s.store.SetPrevFinalizedCheckpt(finalized)
|
||||
s.store.SetFinalizedCheckpt(fCheckpoint)
|
||||
h, err := s.getPayloadHash(ctx, fCheckpoint.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(fCheckpoint, h)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -651,7 +670,7 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
||||
// Skip validation if block has an empty payload.
|
||||
payload, err := blk.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return err
|
||||
return invalidBlock{err}
|
||||
}
|
||||
if blocks.IsEmptyPayload(payload) {
|
||||
return nil
|
||||
|
||||
@@ -92,18 +92,15 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.BeaconBloc
|
||||
func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.VerifyFinalizedBlkDescendant")
|
||||
defer span.End()
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
finalizedBlkSigned, err := s.cfg.BeaconDB.Block(ctx, fRoot)
|
||||
finalizedBlkSigned, err := s.getBlock(ctx, fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if finalizedBlkSigned == nil || finalizedBlkSigned.IsNil() || finalizedBlkSigned.Block().IsNil() {
|
||||
return errors.New("nil finalized block")
|
||||
}
|
||||
finalizedBlk := finalizedBlkSigned.Block()
|
||||
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot())
|
||||
if err != nil {
|
||||
@@ -118,7 +115,7 @@ func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byt
|
||||
bytesutil.Trunc(root[:]), finalizedBlk.Slot(), bytesutil.Trunc(bFinalizedRoot),
|
||||
bytesutil.Trunc(fRoot[:]))
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
return invalidBlock{err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -126,16 +123,17 @@ func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byt
|
||||
// verifyBlkFinalizedSlot validates input block is not less than or equal
|
||||
// to current finalized slot.
|
||||
func (s *Service) verifyBlkFinalizedSlot(b interfaces.BeaconBlock) error {
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
finalizedSlot, err := slots.EpochStart(finalized.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if finalizedSlot >= b.Slot() {
|
||||
return fmt.Errorf("block is equal or earlier than finalized block, slot %d < slot %d", b.Slot(), finalizedSlot)
|
||||
err = fmt.Errorf("block is equal or earlier than finalized block, slot %d < slot %d", b.Slot(), finalizedSlot)
|
||||
return invalidBlock{err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -168,7 +166,10 @@ func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustified
|
||||
if slots.SinceEpochStarts(s.CurrentSlot()) < params.BeaconConfig().SafeSlotsToUpdateJustified {
|
||||
return true, nil
|
||||
}
|
||||
justified := s.store.JustifiedCheckpt()
|
||||
justified, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
jSlot, err := slots.EpochStart(justified.Epoch)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -190,9 +191,9 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
|
||||
defer span.End()
|
||||
|
||||
cpt := state.CurrentJustifiedCheckpoint()
|
||||
bestJustified := s.store.BestJustifiedCheckpt()
|
||||
if bestJustified == nil {
|
||||
return errNilBestJustifiedInStore
|
||||
bestJustified, err := s.store.BestJustifiedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get best justified checkpoint")
|
||||
}
|
||||
if cpt.Epoch > bestJustified.Epoch {
|
||||
s.store.SetBestJustifiedCheckpt(cpt)
|
||||
@@ -203,12 +204,16 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
|
||||
}
|
||||
|
||||
if canUpdate {
|
||||
justified := s.store.JustifiedCheckpt()
|
||||
if justified == nil {
|
||||
return errNilJustifiedInStore
|
||||
justified, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
s.store.SetPrevJustifiedCheckpt(justified)
|
||||
s.store.SetJustifiedCheckpt(cpt)
|
||||
h, err := s.getPayloadHash(ctx, cpt.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(cpt, h)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -218,16 +223,20 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
|
||||
// caches justified checkpoint balances for fork choice and save justified checkpoint in DB.
|
||||
// This method does not have defense against fork choice bouncing attack, which is why it's only recommend to be used during initial syncing.
|
||||
func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpoint) error {
|
||||
justified := s.store.JustifiedCheckpt()
|
||||
if justified == nil {
|
||||
return errNilJustifiedInStore
|
||||
justified, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
s.store.SetPrevJustifiedCheckpt(justified)
|
||||
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp); err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckpt(cp)
|
||||
h, err := s.getPayloadHash(ctx, cp.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(cp, h)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -339,9 +348,9 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
parentRoot := bytesutil.ToBytes32(blk.ParentRoot())
|
||||
slot := blk.Slot()
|
||||
// Fork choice only matters from last finalized slot.
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fSlot, err := slots.EpochStart(finalized.Epoch)
|
||||
if err != nil {
|
||||
@@ -350,7 +359,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
higherThanFinalized := slot > fSlot
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
for !s.cfg.ForkChoiceStore.HasNode(parentRoot) && s.cfg.BeaconDB.HasBlock(ctx, parentRoot) && higherThanFinalized {
|
||||
b, err := s.cfg.BeaconDB.Block(ctx, parentRoot)
|
||||
b, err := s.getBlock(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -39,6 +40,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/time"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestStore_OnBlock_ProtoArray(t *testing.T) {
|
||||
@@ -129,9 +131,9 @@ func TestStore_OnBlock_ProtoArray(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: validGenesisRoot[:]})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: validGenesisRoot[:]}, [32]byte{'a'})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{Root: validGenesisRoot[:]})
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: roots[0]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: roots[0]}, [32]byte{'b'})
|
||||
service.store.SetPrevFinalizedCheckpt(ðpb.Checkpoint{Root: validGenesisRoot[:]})
|
||||
|
||||
root, err := tt.blk.Block.HashTreeRoot()
|
||||
@@ -232,9 +234,9 @@ func TestStore_OnBlock_DoublyLinkedTree(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: validGenesisRoot[:]})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: validGenesisRoot[:]}, [32]byte{'a'})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{Root: validGenesisRoot[:]})
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: roots[0]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: roots[0]}, [32]byte{'b'})
|
||||
service.store.SetPrevFinalizedCheckpt(ðpb.Checkpoint{Root: validGenesisRoot[:]})
|
||||
|
||||
root, err := tt.blk.Block.HashTreeRoot()
|
||||
@@ -289,7 +291,8 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
|
||||
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
@@ -353,7 +356,8 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
|
||||
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
@@ -415,7 +419,9 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
|
||||
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
service.saveInitSyncBlock(gRoot, wsb)
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
@@ -484,7 +490,7 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
|
||||
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
|
||||
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: lastJustifiedRoot[:]})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
|
||||
update, err = service.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: newJustifiedRoot[:]})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, update, "Should be able to update justified")
|
||||
@@ -516,7 +522,7 @@ func TestShouldUpdateJustified_ReturnFalse_ProtoArray(t *testing.T) {
|
||||
|
||||
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
|
||||
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: lastJustifiedRoot[:]})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
|
||||
|
||||
update, err := service.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: newJustifiedRoot[:]})
|
||||
require.NoError(t, err)
|
||||
@@ -549,7 +555,7 @@ func TestShouldUpdateJustified_ReturnFalse_DoublyLinkedTree(t *testing.T) {
|
||||
|
||||
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
|
||||
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: lastJustifiedRoot[:]})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: lastJustifiedRoot[:]}, [32]byte{'a'})
|
||||
|
||||
update, err := service.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: newJustifiedRoot[:]})
|
||||
require.NoError(t, err)
|
||||
@@ -577,7 +583,7 @@ func TestCachedPreState_CanGetFromStateSummary_ProtoArray(t *testing.T) {
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
@@ -614,7 +620,7 @@ func TestCachedPreState_CanGetFromStateSummary_DoublyLinkedTree(t *testing.T) {
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
@@ -648,7 +654,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
@@ -656,7 +662,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
wb, err := wrapper.WrappedBeaconBlock(b.Block)
|
||||
require.NoError(t, err)
|
||||
err = service.verifyBlkPreState(ctx, wb)
|
||||
@@ -691,7 +697,7 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
r, err := signedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: []byte{'A'}})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: []byte{'A'}}, [32]byte{'a'})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{Root: []byte{'A'}})
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -703,13 +709,17 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
require.NoError(t, s.SetCurrentJustifiedCheckpoint(ðpb.Checkpoint{Epoch: 1, Root: r[:]}))
|
||||
require.NoError(t, service.updateJustified(context.Background(), s))
|
||||
|
||||
assert.Equal(t, s.CurrentJustifiedCheckpoint().Epoch, service.store.BestJustifiedCheckpt().Epoch, "Incorrect justified epoch in service")
|
||||
cp, err := service.store.BestJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, s.CurrentJustifiedCheckpoint().Epoch, cp.Epoch, "Incorrect justified epoch in service")
|
||||
|
||||
// Could not update
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{Root: []byte{'A'}, Epoch: 2})
|
||||
require.NoError(t, service.updateJustified(context.Background(), s))
|
||||
|
||||
assert.Equal(t, types.Epoch(2), service.store.BestJustifiedCheckpt().Epoch, "Incorrect justified epoch in service")
|
||||
cp, err = service.store.BestJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, types.Epoch(2), cp.Epoch, "Incorrect justified epoch in service")
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
|
||||
@@ -723,7 +733,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: make([]byte, 32)})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -768,7 +778,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: make([]byte, 32)})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -814,7 +824,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: make([]byte, 32)})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -863,7 +873,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: make([]byte, 32)})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -913,7 +923,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
// Set finalized epoch to 1.
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 1})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 1}, [32]byte{})
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -974,7 +984,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
// Set finalized epoch to 1.
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 1})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 1}, [32]byte{})
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -1312,16 +1322,17 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
finalizedRoot [32]byte
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantedErr string
|
||||
name string
|
||||
args args
|
||||
wantedErr string
|
||||
invalidBlockRoot bool
|
||||
}{
|
||||
{
|
||||
name: "could not get finalized block in block service cache",
|
||||
args: args{
|
||||
finalizedRoot: [32]byte{'a'},
|
||||
},
|
||||
wantedErr: "nil finalized block",
|
||||
wantedErr: "block not found in cache or db",
|
||||
},
|
||||
{
|
||||
name: "could not get finalized block root in DB",
|
||||
@@ -1337,7 +1348,8 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
finalizedRoot: r1,
|
||||
parentRoot: r,
|
||||
},
|
||||
wantedErr: "is not a descendant of the current finalized block slot",
|
||||
wantedErr: "is not a descendant of the current finalized block slot",
|
||||
invalidBlockRoot: true,
|
||||
},
|
||||
{
|
||||
name: "is descendant",
|
||||
@@ -1350,10 +1362,13 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: tt.args.finalizedRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: tt.args.finalizedRoot[:]}, [32]byte{})
|
||||
err = service.VerifyFinalizedBlkDescendant(ctx, tt.args.parentRoot)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
if tt.invalidBlockRoot {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
} else if err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -1378,14 +1393,18 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, beaconState, gRoot))
|
||||
service.originBlockRoot = gRoot
|
||||
currentCp := ðpb.Checkpoint{Epoch: 1}
|
||||
service.store.SetJustifiedCheckpt(currentCp)
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(currentCp, [32]byte{'a'})
|
||||
newCp := ðpb.Checkpoint{Epoch: 2, Root: gRoot[:]}
|
||||
|
||||
require.NoError(t, service.updateJustifiedInitSync(ctx, newCp))
|
||||
|
||||
assert.DeepSSZEqual(t, currentCp, service.PreviousJustifiedCheckpt(), "Incorrect previous justified checkpoint")
|
||||
assert.DeepSSZEqual(t, newCp, service.CurrentJustifiedCheckpt(), "Incorrect current justified checkpoint in cache")
|
||||
cp, err := service.cfg.BeaconDB.JustifiedCheckpoint(ctx)
|
||||
cp, err := service.PreviousJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, currentCp, cp, "Incorrect previous justified checkpoint")
|
||||
cp, err = service.CurrentJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, newCp, cp, "Incorrect current justified checkpoint in cache")
|
||||
cp, err = service.cfg.BeaconDB.JustifiedCheckpoint(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, newCp, cp, "Incorrect current justified checkpoint in db")
|
||||
}
|
||||
@@ -1439,7 +1458,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
|
||||
testState := gs.Copy()
|
||||
for i := types.Slot(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
@@ -1453,16 +1472,78 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, types.Epoch(3), service.CurrentJustifiedCheckpt().Epoch)
|
||||
require.Equal(t, types.Epoch(2), service.FinalizedCheckpt().Epoch)
|
||||
cp, err := service.CurrentJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, types.Epoch(3), cp.Epoch)
|
||||
cp, err = service.FinalizedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, types.Epoch(2), cp.Epoch)
|
||||
|
||||
// The update should persist in DB.
|
||||
j, err := service.cfg.BeaconDB.JustifiedCheckpoint(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, j.Epoch, service.CurrentJustifiedCheckpt().Epoch)
|
||||
cp, err = service.CurrentJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, j.Epoch, cp.Epoch)
|
||||
f, err := service.cfg.BeaconDB.FinalizedCheckpoint(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, f.Epoch, service.FinalizedCheckpt().Epoch)
|
||||
cp, err = service.FinalizedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, f.Epoch, cp.Epoch)
|
||||
}
|
||||
|
||||
func TestOnBlock_NilBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.onBlock(ctx, nil, [32]byte{})
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
func TestOnBlock_InvalidSignature(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
|
||||
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
blk.Signature = []byte{'a'} // Mutate the signature.
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, r)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
@@ -1493,7 +1574,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
|
||||
testState := gs.Copy()
|
||||
for i := types.Slot(1); i < params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
@@ -1524,7 +1605,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 10}))
|
||||
assert.NoError(t, gs.SetEth1DepositIndex(8))
|
||||
@@ -1563,7 +1644,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 7}))
|
||||
assert.NoError(t, gs.SetEth1DepositIndex(6))
|
||||
@@ -1887,3 +1968,102 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
service.insertSlashingsToForkChoiceStore(ctx, wb.Block().Body().AttesterSlashings())
|
||||
}
|
||||
|
||||
func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
|
||||
blk1, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
r1, err := blk1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb1, err := wrapper.WrappedSignedBeaconBlock(blk1)
|
||||
require.NoError(t, err)
|
||||
blk2, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
r2, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb2, err := wrapper.WrappedSignedBeaconBlock(blk2)
|
||||
require.NoError(t, err)
|
||||
blk3, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
r3, err := blk3.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb3, err := wrapper.WrappedSignedBeaconBlock(blk3)
|
||||
require.NoError(t, err)
|
||||
blk4, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 4)
|
||||
require.NoError(t, err)
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb4, err := wrapper.WrappedSignedBeaconBlock(blk4)
|
||||
require.NoError(t, err)
|
||||
|
||||
logHook := logTest.NewGlobal()
|
||||
for i := 0; i < 10; i++ {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(4)
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb1, r1))
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb2, r2))
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb3, r3))
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
require.NoError(t, service.onBlock(ctx, wsb4, r4))
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
require.LogsDoNotContain(t, logHook, "New head does not exist in DB. Do nothing")
|
||||
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r1))
|
||||
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r2))
|
||||
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r3))
|
||||
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r4))
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'a'})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 1}, [32]byte{'a'})
|
||||
blk := util.HydrateBeaconBlock(ðpb.BeaconBlock{Slot: 1})
|
||||
wb, err := wrapper.WrappedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = service.verifyBlkFinalizedSlot(wb)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
@@ -71,9 +71,9 @@ func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) e
|
||||
return nil
|
||||
}
|
||||
|
||||
f := s.FinalizedCheckpt()
|
||||
if f == nil {
|
||||
return errNilFinalizedInStore
|
||||
f, err := s.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ss, err := slots.EpochStart(f.Epoch)
|
||||
if err != nil {
|
||||
@@ -128,42 +128,55 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
|
||||
return
|
||||
}
|
||||
|
||||
// Continue when there's no fork choice attestation, there's nothing to process and update head.
|
||||
// This covers the condition when the node is still initial syncing to the head of the chain.
|
||||
if s.cfg.AttPool.ForkchoiceAttestationCount() == 0 {
|
||||
continue
|
||||
if err := s.UpdateHead(s.ctx); err != nil {
|
||||
log.WithError(err).Error("Could not process attestations and update head")
|
||||
return
|
||||
}
|
||||
s.processAttestations(s.ctx)
|
||||
|
||||
justified := s.store.JustifiedCheckpt()
|
||||
if justified == nil {
|
||||
log.WithError(errNilJustifiedInStore).Error("Could not get justified checkpoint")
|
||||
continue
|
||||
}
|
||||
balances, err := s.justifiedBalances.get(s.ctx, bytesutil.ToBytes32(justified.Root))
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Unable to get justified balances for root %v", justified.Root)
|
||||
continue
|
||||
}
|
||||
newHeadRoot, err := s.updateHead(s.ctx, balances)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Resolving fork due to new attestation")
|
||||
}
|
||||
if s.headRoot() != newHeadRoot {
|
||||
log.WithFields(logrus.Fields{
|
||||
"oldHeadRoot": fmt.Sprintf("%#x", s.headRoot()),
|
||||
"newHeadRoot": fmt.Sprintf("%#x", newHeadRoot),
|
||||
}).Debug("Head changed due to attestations")
|
||||
}
|
||||
s.notifyEngineIfChangedHead(s.ctx, newHeadRoot)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// UpdateHead updates the canonical head of the chain based on information from fork-choice attestations and votes.
|
||||
// It requires no external inputs.
|
||||
func (s *Service) UpdateHead(ctx context.Context) error {
|
||||
// Continue when there's no fork choice attestation, there's nothing to process and update head.
|
||||
// This covers the condition when the node is still initial syncing to the head of the chain.
|
||||
if s.cfg.AttPool.ForkchoiceAttestationCount() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only one process can process attestations and update head at a time.
|
||||
s.processAttestationsLock.Lock()
|
||||
defer s.processAttestationsLock.Unlock()
|
||||
|
||||
s.processAttestations(ctx)
|
||||
|
||||
justified, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(justified.Root))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newHeadRoot, err := s.updateHead(ctx, balances)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Resolving fork due to new attestation")
|
||||
}
|
||||
if s.headRoot() != newHeadRoot {
|
||||
log.WithFields(logrus.Fields{
|
||||
"oldHeadRoot": fmt.Sprintf("%#x", s.headRoot()),
|
||||
"newHeadRoot": fmt.Sprintf("%#x", newHeadRoot),
|
||||
}).Debug("Head changed due to attestations")
|
||||
}
|
||||
s.notifyEngineIfChangedHead(ctx, newHeadRoot)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This calls notify Forkchoice Update in the event that the head has changed
|
||||
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) {
|
||||
if s.headRoot() == newHeadRoot {
|
||||
if newHeadRoot == [32]byte{} || s.headRoot() == newHeadRoot {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -172,12 +185,6 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
|
||||
return // We don't have the block, don't notify the engine and update head.
|
||||
}
|
||||
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
log.WithError(errNilFinalizedInStore).Error("could not get finalized checkpoint")
|
||||
return
|
||||
}
|
||||
|
||||
newHeadBlock, err := s.getBlock(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get new head block")
|
||||
@@ -189,11 +196,9 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
|
||||
return
|
||||
}
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
headState: headState,
|
||||
headRoot: newHeadRoot,
|
||||
headBlock: newHeadBlock.Block(),
|
||||
finalizedRoot: bytesutil.ToBytes32(finalized.Root),
|
||||
justifiedRoot: bytesutil.ToBytes32(s.store.JustifiedCheckpt().Root),
|
||||
headState: headState,
|
||||
headRoot: newHeadRoot,
|
||||
headBlock: newHeadBlock.Block(),
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdate(s.ctx, arg)
|
||||
if err != nil {
|
||||
|
||||
@@ -137,14 +137,14 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
service.notifyEngineIfChangedHead(ctx, service.headRoot())
|
||||
hookErr := "could not notify forkchoice update"
|
||||
finalizedErr := "could not get finalized checkpoint"
|
||||
require.LogsDoNotContain(t, hook, finalizedErr)
|
||||
invalidStateErr := "Could not get state from db"
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
gb, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock([32]byte{'a'}, gb)
|
||||
service.notifyEngineIfChangedHead(ctx, [32]byte{'a'})
|
||||
require.LogsContain(t, hook, finalizedErr)
|
||||
require.LogsContain(t, hook, invalidStateErr)
|
||||
|
||||
hook.Reset()
|
||||
service.head = &head{
|
||||
@@ -169,9 +169,9 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
state: st,
|
||||
}
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
|
||||
service.store.SetFinalizedCheckpt(finalized)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(finalized, [32]byte{})
|
||||
service.notifyEngineIfChangedHead(ctx, r1)
|
||||
require.LogsDoNotContain(t, hook, finalizedErr)
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
|
||||
// Block in DB
|
||||
@@ -191,12 +191,51 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
state: st,
|
||||
}
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
|
||||
service.store.SetFinalizedCheckpt(finalized)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(finalized, [32]byte{})
|
||||
service.notifyEngineIfChangedHead(ctx, r1)
|
||||
require.LogsDoNotContain(t, hook, finalizedErr)
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2)
|
||||
require.Equal(t, true, has)
|
||||
require.Equal(t, types.ValidatorIndex(1), vId)
|
||||
require.Equal(t, [8]byte{1}, payloadID)
|
||||
|
||||
// Test zero headRoot returns immediately.
|
||||
headRoot := service.headRoot()
|
||||
service.notifyEngineIfChangedHead(ctx, [32]byte{})
|
||||
require.Equal(t, service.headRoot(), headRoot)
|
||||
}
|
||||
|
||||
func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
opts = append(opts, WithAttestationPool(attestations.NewPool()), WithStateNotifier(&mockBeaconNode{}))
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(atts[0].Data.Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: tRoot[:]}))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
b := util.NewBeaconBlock()
|
||||
wb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wb))
|
||||
service.head.root = r // Old head
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.ForkchoiceAttestations()))
|
||||
require.NoError(t, err, service.UpdateHead(ctx))
|
||||
require.Equal(t, tRoot, service.head.root) // Validate head is the new one
|
||||
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations())) // Validate att pool is empty
|
||||
}
|
||||
|
||||
@@ -30,9 +30,9 @@ type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashings *ethpb.AttesterSlashing)
|
||||
}
|
||||
|
||||
// ReceiveBlock is a function that defines the the operations (minus pubsub)
|
||||
// that are performed on blocks that is received from regular sync service. The operations consists of:
|
||||
// 1. Validate block, apply state transition and update check points
|
||||
// ReceiveBlock is a function that defines the operations (minus pubsub)
|
||||
// that are performed on a received block. The operations consist of:
|
||||
// 1. Validate block, apply state transition and update checkpoints
|
||||
// 2. Apply fork choice to the processed block
|
||||
// 3. Save latest head info
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
@@ -59,9 +59,9 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaco
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
|
||||
@@ -85,7 +85,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
|
||||
defer span.End()
|
||||
|
||||
// Apply state transition on the incoming newly received block batches, one by one.
|
||||
fCheckpoints, jCheckpoints, err := s.onBlockBatch(ctx, blocks, blkRoots)
|
||||
_, _, err := s.onBlockBatch(ctx, blocks, blkRoots)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not process block in batch")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -94,10 +94,6 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
|
||||
|
||||
for i, b := range blocks {
|
||||
blockCopy := b.Copy()
|
||||
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
@@ -110,9 +106,9 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
|
||||
})
|
||||
|
||||
// Reports on blockCopy and fork choice metrics.
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
}
|
||||
@@ -120,7 +116,10 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
@@ -173,7 +172,10 @@ func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
|
||||
currentEpoch := slots.ToEpoch(s.CurrentSlot())
|
||||
// Prevent `sinceFinality` going underflow.
|
||||
var sinceFinality types.Epoch
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
return blk
|
||||
}
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bc := params.BeaconConfig()
|
||||
bc := params.BeaconConfig().Copy()
|
||||
bc.ShardCommitteePeriod = 0 // Required for voluntary exits test in reasonable time.
|
||||
params.OverrideBeaconConfig(bc)
|
||||
|
||||
@@ -141,7 +141,8 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
h := [32]byte{'a'}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, h)
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
|
||||
@@ -181,7 +182,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wg := sync.WaitGroup{}
|
||||
@@ -262,7 +263,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
|
||||
@@ -312,7 +313,7 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{})
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{}, [32]byte{})
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
assert.LogsContain(t, hook, "Entering mode to save hot states in DB")
|
||||
@@ -323,7 +324,7 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{})
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{}, [32]byte{})
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
@@ -336,7 +337,7 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 10000000})
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{}, [32]byte{})
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
|
||||
@@ -50,21 +50,22 @@ const headSyncMinEpochsAfterCheckpoint = 128
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
nextEpochBoundarySlot types.Slot
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]interfaces.SignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
justifiedBalances *stateBalanceCache
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
store *store.Store
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
nextEpochBoundarySlot types.Slot
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]interfaces.SignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
justifiedBalances *stateBalanceCache
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
store *store.Store
|
||||
processAttestationsLock sync.Mutex
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -143,6 +144,7 @@ func (s *Service) Stop() error {
|
||||
defer s.cancel()
|
||||
|
||||
if s.cfg.StateGen != nil && s.head != nil && s.head.state != nil {
|
||||
// Save the last finalized state so that starting up in the following run will be much faster.
|
||||
if err := s.cfg.StateGen.ForceCheckpoint(s.ctx, s.head.state.FinalizedCheckpoint().Root); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -199,13 +201,10 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
forkChoicer = protoarray.New(justified.Epoch, finalized.Epoch, fRoot)
|
||||
}
|
||||
s.cfg.ForkChoiceStore = forkChoicer
|
||||
fb, err := s.cfg.BeaconDB.Block(s.ctx, s.ensureRootNotZeros(fRoot))
|
||||
fb, err := s.getBlock(s.ctx, s.ensureRootNotZeros(fRoot))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint block")
|
||||
}
|
||||
if fb == nil || fb.IsNil() {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
payloadHash, err := getBlockPayloadHash(fb.Block())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get execution payload hash")
|
||||
@@ -337,14 +336,13 @@ func (s *Service) initializeHeadFromDB(ctx context.Context) error {
|
||||
finalizedState.Slot(), flags.HeadSync.Name)
|
||||
}
|
||||
}
|
||||
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block from db")
|
||||
if finalizedState == nil || finalizedState.IsNil() {
|
||||
return errors.New("finalized state can't be nil")
|
||||
}
|
||||
|
||||
if finalizedState == nil || finalizedState.IsNil() || finalizedBlock == nil || finalizedBlock.IsNil() {
|
||||
return errors.New("finalized state and block can't be nil")
|
||||
finalizedBlock, err := s.getBlock(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block")
|
||||
}
|
||||
s.setHead(finalizedRoot, finalizedBlock, finalizedState)
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
@@ -86,9 +87,11 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
pbState, err := v1.ProtobufBeaconState(bState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
mockTrie, err := trie.NewTrie(0)
|
||||
require.NoError(t, err)
|
||||
err = beaconDB.SavePowchainData(ctx, ðpb.ETH1ChainData{
|
||||
BeaconState: pbState,
|
||||
Trie: ðpb.SparseMerkleTrie{},
|
||||
Trie: mockTrie.ToProto(),
|
||||
CurrentEth1Data: ðpb.LatestETH1Data{
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
@@ -221,7 +224,8 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
trie, _, err := util.DepositTrieFromDeposits(deposits)
|
||||
require.NoError(t, err)
|
||||
hashTreeRoot := trie.HashTreeRoot()
|
||||
hashTreeRoot, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
genState, err := transition.EmptyGenesisState()
|
||||
require.NoError(t, err)
|
||||
err = genState.SetEth1Data(ðpb.Eth1Data{
|
||||
@@ -273,8 +277,12 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
// Test the start function.
|
||||
chainService.Start()
|
||||
|
||||
require.DeepEqual(t, blkRoot[:], chainService.store.FinalizedCheckpt().Root, "Finalize Checkpoint root is incorrect")
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash[:], chainService.store.JustifiedCheckpt().Root, "Justified Checkpoint root is incorrect")
|
||||
cp, err := chainService.store.FinalizedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, blkRoot[:], cp.Root, "Finalize Checkpoint root is incorrect")
|
||||
cp, err = chainService.store.JustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash[:], cp.Root, "Justified Checkpoint root is incorrect")
|
||||
|
||||
require.NoError(t, chainService.Stop(), "Unable to stop chain service")
|
||||
|
||||
@@ -500,7 +508,7 @@ func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
b := util.NewBeaconBlock()
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -521,7 +529,7 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
b := util.NewBeaconBlock()
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -594,7 +602,7 @@ func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
blk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
@@ -617,7 +625,7 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
blk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
|
||||
@@ -37,7 +37,7 @@ func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) {
|
||||
// the previously read value. This cache assumes we only want to cache one
|
||||
// set of balances for a single root (the current justified root).
|
||||
//
|
||||
// warning: this is not thread-safe on its own, relies on get() for locking
|
||||
// WARNING: this is not thread-safe on its own, relies on get() for locking
|
||||
func (c *stateBalanceCache) update(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) {
|
||||
stateBalanceCacheMiss.Inc()
|
||||
justifiedState, err := c.stateGen.StateByRoot(ctx, justifiedRoot)
|
||||
|
||||
@@ -10,7 +10,10 @@ go_library(
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = ["//proto/prysm/v1alpha1:go_default_library"],
|
||||
deps = [
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
|
||||
@@ -17,9 +17,19 @@ func TestNew(t *testing.T) {
|
||||
Root: []byte("hello"),
|
||||
}
|
||||
s := New(j, f)
|
||||
require.DeepSSZEqual(t, s.JustifiedCheckpt(), j)
|
||||
require.DeepSSZEqual(t, s.BestJustifiedCheckpt(), j)
|
||||
require.DeepSSZEqual(t, s.PrevJustifiedCheckpt(), j)
|
||||
require.DeepSSZEqual(t, s.FinalizedCheckpt(), f)
|
||||
require.DeepSSZEqual(t, s.PrevFinalizedCheckpt(), f)
|
||||
cp, err := s.JustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, j, cp)
|
||||
cp, err = s.BestJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, j, cp)
|
||||
cp, err = s.PrevJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, j, cp)
|
||||
cp, err = s.FinalizedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, f, cp)
|
||||
cp, err = s.PrevFinalizedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, f, cp)
|
||||
}
|
||||
|
||||
@@ -1,40 +1,76 @@
|
||||
package store
|
||||
|
||||
import ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNilCheckpoint = errors.New("nil checkpoint")
|
||||
)
|
||||
|
||||
// PrevJustifiedCheckpt returns the previous justified checkpoint in the Store.
|
||||
func (s *Store) PrevJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
func (s *Store) PrevJustifiedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.prevJustifiedCheckpt
|
||||
if s.prevJustifiedCheckpt == nil {
|
||||
return nil, ErrNilCheckpoint
|
||||
}
|
||||
return s.prevJustifiedCheckpt, nil
|
||||
}
|
||||
|
||||
// BestJustifiedCheckpt returns the best justified checkpoint in the Store.
|
||||
func (s *Store) BestJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
func (s *Store) BestJustifiedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.bestJustifiedCheckpt
|
||||
if s.bestJustifiedCheckpt == nil {
|
||||
return nil, ErrNilCheckpoint
|
||||
}
|
||||
return s.bestJustifiedCheckpt, nil
|
||||
}
|
||||
|
||||
// JustifiedCheckpt returns the justified checkpoint in the Store.
|
||||
func (s *Store) JustifiedCheckpt() *ethpb.Checkpoint {
|
||||
func (s *Store) JustifiedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.justifiedCheckpt
|
||||
if s.justifiedCheckpt == nil {
|
||||
return nil, ErrNilCheckpoint
|
||||
}
|
||||
return s.justifiedCheckpt, nil
|
||||
}
|
||||
|
||||
// JustifiedPayloadBlockHash returns the justified payload block hash reflecting justified check point.
|
||||
func (s *Store) JustifiedPayloadBlockHash() [32]byte {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.justifiedPayloadBlockHash
|
||||
}
|
||||
|
||||
// PrevFinalizedCheckpt returns the previous finalized checkpoint in the Store.
|
||||
func (s *Store) PrevFinalizedCheckpt() *ethpb.Checkpoint {
|
||||
func (s *Store) PrevFinalizedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.prevFinalizedCheckpt
|
||||
if s.prevFinalizedCheckpt == nil {
|
||||
return nil, ErrNilCheckpoint
|
||||
}
|
||||
return s.prevFinalizedCheckpt, nil
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the finalized checkpoint in the Store.
|
||||
func (s *Store) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
func (s *Store) FinalizedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.finalizedCheckpt
|
||||
if s.finalizedCheckpt == nil {
|
||||
return nil, ErrNilCheckpoint
|
||||
}
|
||||
return s.finalizedCheckpt, nil
|
||||
}
|
||||
|
||||
// FinalizedPayloadBlockHash returns the finalized payload block hash reflecting finalized check point.
|
||||
func (s *Store) FinalizedPayloadBlockHash() [32]byte {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.finalizedPayloadBlockHash
|
||||
}
|
||||
|
||||
// SetPrevJustifiedCheckpt sets the previous justified checkpoint in the Store.
|
||||
@@ -51,18 +87,20 @@ func (s *Store) SetBestJustifiedCheckpt(cp *ethpb.Checkpoint) {
|
||||
s.bestJustifiedCheckpt = cp
|
||||
}
|
||||
|
||||
// SetJustifiedCheckpt sets the justified checkpoint in the Store.
|
||||
func (s *Store) SetJustifiedCheckpt(cp *ethpb.Checkpoint) {
|
||||
// SetJustifiedCheckptAndPayloadHash sets the justified checkpoint and blockhash in the Store.
|
||||
func (s *Store) SetJustifiedCheckptAndPayloadHash(cp *ethpb.Checkpoint, h [32]byte) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.justifiedCheckpt = cp
|
||||
s.justifiedPayloadBlockHash = h
|
||||
}
|
||||
|
||||
// SetFinalizedCheckpt sets the finalized checkpoint in the Store.
|
||||
func (s *Store) SetFinalizedCheckpt(cp *ethpb.Checkpoint) {
|
||||
// SetFinalizedCheckptAndPayloadHash sets the finalized checkpoint and blockhash in the Store.
|
||||
func (s *Store) SetFinalizedCheckptAndPayloadHash(cp *ethpb.Checkpoint, h [32]byte) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.finalizedCheckpt = cp
|
||||
s.finalizedPayloadBlockHash = h
|
||||
}
|
||||
|
||||
// SetPrevFinalizedCheckpt sets the previous finalized checkpoint in the Store.
|
||||
|
||||
@@ -10,44 +10,63 @@ import (
|
||||
func Test_store_PrevJustifiedCheckpt(t *testing.T) {
|
||||
s := &Store{}
|
||||
var cp *ethpb.Checkpoint
|
||||
require.Equal(t, cp, s.PrevJustifiedCheckpt())
|
||||
_, err := s.PrevJustifiedCheckpt()
|
||||
require.ErrorIs(t, ErrNilCheckpoint, err)
|
||||
cp = ðpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
|
||||
s.SetPrevJustifiedCheckpt(cp)
|
||||
require.Equal(t, cp, s.PrevJustifiedCheckpt())
|
||||
got, err := s.PrevJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cp, got)
|
||||
}
|
||||
|
||||
func Test_store_BestJustifiedCheckpt(t *testing.T) {
|
||||
s := &Store{}
|
||||
var cp *ethpb.Checkpoint
|
||||
require.Equal(t, cp, s.BestJustifiedCheckpt())
|
||||
_, err := s.BestJustifiedCheckpt()
|
||||
require.ErrorIs(t, ErrNilCheckpoint, err)
|
||||
cp = ðpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
|
||||
s.SetBestJustifiedCheckpt(cp)
|
||||
require.Equal(t, cp, s.BestJustifiedCheckpt())
|
||||
got, err := s.BestJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cp, got)
|
||||
}
|
||||
|
||||
func Test_store_JustifiedCheckpt(t *testing.T) {
|
||||
s := &Store{}
|
||||
var cp *ethpb.Checkpoint
|
||||
require.Equal(t, cp, s.JustifiedCheckpt())
|
||||
_, err := s.JustifiedCheckpt()
|
||||
require.ErrorIs(t, ErrNilCheckpoint, err)
|
||||
cp = ðpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
|
||||
s.SetJustifiedCheckpt(cp)
|
||||
require.Equal(t, cp, s.JustifiedCheckpt())
|
||||
h := [32]byte{'b'}
|
||||
s.SetJustifiedCheckptAndPayloadHash(cp, h)
|
||||
got, err := s.JustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cp, got)
|
||||
require.Equal(t, h, s.JustifiedPayloadBlockHash())
|
||||
}
|
||||
|
||||
func Test_store_FinalizedCheckpt(t *testing.T) {
|
||||
s := &Store{}
|
||||
var cp *ethpb.Checkpoint
|
||||
require.Equal(t, cp, s.FinalizedCheckpt())
|
||||
_, err := s.FinalizedCheckpt()
|
||||
require.ErrorIs(t, ErrNilCheckpoint, err)
|
||||
cp = ðpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
|
||||
s.SetFinalizedCheckpt(cp)
|
||||
require.Equal(t, cp, s.FinalizedCheckpt())
|
||||
h := [32]byte{'b'}
|
||||
s.SetFinalizedCheckptAndPayloadHash(cp, h)
|
||||
got, err := s.FinalizedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cp, got)
|
||||
require.Equal(t, h, s.FinalizedPayloadBlockHash())
|
||||
}
|
||||
|
||||
func Test_store_PrevFinalizedCheckpt(t *testing.T) {
|
||||
s := &Store{}
|
||||
var cp *ethpb.Checkpoint
|
||||
require.Equal(t, cp, s.PrevFinalizedCheckpt())
|
||||
_, err := s.PrevFinalizedCheckpt()
|
||||
require.ErrorIs(t, ErrNilCheckpoint, err)
|
||||
cp = ðpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
|
||||
s.SetPrevFinalizedCheckpt(cp)
|
||||
require.Equal(t, cp, s.PrevFinalizedCheckpt())
|
||||
got, err := s.PrevFinalizedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cp, got)
|
||||
}
|
||||
|
||||
@@ -17,9 +17,11 @@ import (
|
||||
// best_justified_checkpoint: Checkpoint
|
||||
// proposerBoostRoot: Root
|
||||
type Store struct {
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
finalizedCheckpt *ethpb.Checkpoint
|
||||
bestJustifiedCheckpt *ethpb.Checkpoint
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
justifiedPayloadBlockHash [32]byte
|
||||
finalizedCheckpt *ethpb.Checkpoint
|
||||
finalizedPayloadBlockHash [32]byte
|
||||
bestJustifiedCheckpt *ethpb.Checkpoint
|
||||
sync.RWMutex
|
||||
// These are not part of the consensus spec, but we do use them to return gRPC API requests.
|
||||
// TODO(10094): Consider removing in v3.
|
||||
|
||||
@@ -282,18 +282,18 @@ func (s *ChainService) CurrentFork() *ethpb.Fork {
|
||||
}
|
||||
|
||||
// FinalizedCheckpt mocks FinalizedCheckpt method in chain service.
|
||||
func (s *ChainService) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
return s.FinalizedCheckPoint
|
||||
func (s *ChainService) FinalizedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
return s.FinalizedCheckPoint, nil
|
||||
}
|
||||
|
||||
// CurrentJustifiedCheckpt mocks CurrentJustifiedCheckpt method in chain service.
|
||||
func (s *ChainService) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return s.CurrentJustifiedCheckPoint
|
||||
func (s *ChainService) CurrentJustifiedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
return s.CurrentJustifiedCheckPoint, nil
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpt mocks PreviousJustifiedCheckpt method in chain service.
|
||||
func (s *ChainService) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return s.PreviousJustifiedCheckPoint
|
||||
func (s *ChainService) PreviousJustifiedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
return s.PreviousJustifiedCheckPoint, nil
|
||||
}
|
||||
|
||||
// ReceiveAttestation mocks ReceiveAttestation method in chain service.
|
||||
@@ -376,7 +376,7 @@ func (_ *ChainService) HeadGenesisValidatorsRoot() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// VerifyBlkDescendant mocks VerifyBlkDescendant and always returns nil.
|
||||
// VerifyFinalizedBlkDescendant mocks VerifyBlkDescendant and always returns nil.
|
||||
func (s *ChainService) VerifyFinalizedBlkDescendant(_ context.Context, _ [32]byte) error {
|
||||
return s.VerifyBlkDescendantErr
|
||||
}
|
||||
@@ -451,6 +451,8 @@ func (s *ChainService) IsOptimisticForRoot(_ context.Context, _ [32]byte) (bool,
|
||||
return s.Optimistic, nil
|
||||
}
|
||||
|
||||
// UpdateHead mocks the same method in the chain service.
|
||||
func (s *ChainService) UpdateHead(_ context.Context) error { return nil }
|
||||
|
||||
// ReceiveAttesterSlashing mocks the same method in the chain service.
|
||||
func (s *ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {
|
||||
}
|
||||
func (s *ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {}
|
||||
|
||||
@@ -30,10 +30,11 @@ type WeakSubjectivityVerifier struct {
|
||||
db weakSubjectivityDB
|
||||
}
|
||||
|
||||
// NewWeakSubjectivityVerifier validates a checkpoint, and if valid, uses it to initialize a weak subjectivity verifier
|
||||
// NewWeakSubjectivityVerifier validates a checkpoint, and if valid, uses it to initialize a weak subjectivity verifier.
|
||||
func NewWeakSubjectivityVerifier(wsc *ethpb.Checkpoint, db weakSubjectivityDB) (*WeakSubjectivityVerifier, error) {
|
||||
if wsc == nil || len(wsc.Root) == 0 || wsc.Epoch == 0 {
|
||||
log.Warn("No valid weak subjectivity checkpoint specified, running without weak subjectivity verification")
|
||||
log.Info("No checkpoint for syncing provided, node will begin syncing from genesis. Checkpoint Sync is an optional feature that allows your node to sync from a more recent checkpoint, " +
|
||||
"which enhances the security of your local beacon node and the broader network. See https://docs.prylabs.network/docs/next/prysm-usage/checkpoint-sync/ to learn how to configure Checkpoint Sync.")
|
||||
return &WeakSubjectivityVerifier{
|
||||
enabled: false,
|
||||
}, nil
|
||||
@@ -58,7 +59,6 @@ func (v *WeakSubjectivityVerifier) VerifyWeakSubjectivity(ctx context.Context, f
|
||||
if v.verified || !v.enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Two conditions are described in the specs:
|
||||
// IF epoch_number > store.finalized_checkpoint.epoch,
|
||||
// then ASSERT during block sync that block with root block_root
|
||||
@@ -92,6 +92,5 @@ func (v *WeakSubjectivityVerifier) VerifyWeakSubjectivity(ctx context.Context, f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Wrap(errWSBlockNotFoundInEpoch, fmt.Sprintf("root=%#x, epoch=%d", v.root, v.epoch))
|
||||
}
|
||||
|
||||
@@ -79,8 +79,10 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
store: &store.Store{},
|
||||
wsVerifier: wv,
|
||||
}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: tt.finalizedEpoch})
|
||||
err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), s.store.FinalizedCheckpt().Epoch)
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: tt.finalizedEpoch}, [32]byte{})
|
||||
cp, err := s.store.FinalizedCheckpt()
|
||||
require.NoError(t, err)
|
||||
err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), cp.Epoch)
|
||||
if tt.wantErr == nil {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
|
||||
31
beacon-chain/builder/BUILD.bazel
Normal file
31
beacon-chain/builder/BUILD.bazel
Normal file
@@ -0,0 +1,31 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"options.go",
|
||||
"service.go",
|
||||
"verify.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/builder",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client/builder:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/authorization:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
58
beacon-chain/builder/options.go
Normal file
58
beacon-chain/builder/options.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
"github.com/prysmaticlabs/prysm/network/authorization"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
type Option func(s *Service) error
|
||||
|
||||
// FlagOptions for builder service flag configurations.
|
||||
func FlagOptions(c *cli.Context) ([]Option, error) {
|
||||
endpoints := parseBuilderEndpoints(c)
|
||||
opts := []Option{
|
||||
WithBuilderEndpoints(endpoints),
|
||||
}
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
func WithBuilderEndpoints(endpoints []string) Option {
|
||||
return func(s *Service) error {
|
||||
stringEndpoints := dedupEndpoints(endpoints)
|
||||
endpoints := make([]network.Endpoint, len(stringEndpoints))
|
||||
for i, e := range stringEndpoints {
|
||||
endpoints[i] = covertEndPoint(e)
|
||||
}
|
||||
s.cfg.builderEndpoint = endpoints[0] // Use the first one as the default.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func covertEndPoint(ep string) network.Endpoint {
|
||||
return network.Endpoint{
|
||||
Url: ep,
|
||||
Auth: network.AuthorizationData{ // Not sure about authorization for now.
|
||||
Method: authorization.None,
|
||||
Value: "",
|
||||
}}
|
||||
}
|
||||
|
||||
func parseBuilderEndpoints(c *cli.Context) []string {
|
||||
// Goal is to support multiple end points later.
|
||||
return []string{c.String(flags.MevBuilderFlag.Name)}
|
||||
}
|
||||
|
||||
func dedupEndpoints(endpoints []string) []string {
|
||||
selectionMap := make(map[string]bool)
|
||||
newEndpoints := make([]string, 0, len(endpoints))
|
||||
for _, point := range endpoints {
|
||||
if selectionMap[point] {
|
||||
continue
|
||||
}
|
||||
newEndpoints = append(newEndpoints, point)
|
||||
selectionMap[point] = true
|
||||
}
|
||||
return newEndpoints
|
||||
}
|
||||
187
beacon-chain/builder/service.go
Normal file
187
beacon-chain/builder/service.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/api/client/builder"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
type BlockBuilder interface {
|
||||
SubmitBlindedBlock(ctx context.Context, block *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error)
|
||||
GetHeader(ctx context.Context, slot types.Slot, parentHash [32]byte, pubKey [48]byte) (*ethpb.SignedBuilderBid, error)
|
||||
Status() error
|
||||
RegisterValidator(ctx context.Context, reg *ethpb.SignedValidatorRegistrationV1) error
|
||||
}
|
||||
|
||||
// config defines a config struct for dependencies into the service.
|
||||
type config struct {
|
||||
builderEndpoint network.Endpoint
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
cfg *config
|
||||
c *builder.Client
|
||||
}
|
||||
|
||||
func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
s := &Service{
|
||||
cfg: &config{},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(s); err != nil {
|
||||
return nil, err
|
||||
|
||||
}
|
||||
}
|
||||
//c, err := builder.NewClient("http://localhost:28545")
|
||||
//if err != nil {
|
||||
// return nil, err
|
||||
//}
|
||||
//sk, err := bls.RandKey()
|
||||
//if err != nil {
|
||||
// return nil, err
|
||||
//}
|
||||
|
||||
//reg := ðpb.ValidatorRegistrationV1{
|
||||
// FeeRecipient: params.BeaconConfig().DefaultFeeRecipient.Bytes(),
|
||||
// GasLimit: 100000000,
|
||||
// Timestamp: uint64(time.Now().Unix()),
|
||||
// Pubkey: sk.PublicKey().Marshal(),
|
||||
//}
|
||||
//sig := sk.Sign(reg.Pubkey)
|
||||
|
||||
//if err := c.RegisterValidator(ctx, ðpb.SignedValidatorRegistrationV1{
|
||||
// Message: reg,
|
||||
// Signature: sig.Marshal(),
|
||||
//}); err != nil {
|
||||
// return nil, err
|
||||
//}
|
||||
|
||||
//h := "a0513a503d5bd6e89a144c3268e5b7e9da9dbf63df125a360e3950a7d0d67131"
|
||||
//data, err := hex.DecodeString(h)
|
||||
//if err != nil {
|
||||
// panic(err)
|
||||
//}
|
||||
//b, err := c.GetHeader(ctx, 1, bytesutil.ToBytes32(data), [48]byte{})
|
||||
//if err != nil {
|
||||
// return nil, err
|
||||
//}
|
||||
//msg := b.Message
|
||||
//header := msg.Header
|
||||
//
|
||||
//genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
//b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
|
||||
//if err != nil {
|
||||
// return nil, err
|
||||
//}
|
||||
//sb := HydrateSignedBlindedBeaconBlockBellatrix(ðpb.SignedBlindedBeaconBlockBellatrix{
|
||||
// Block: ðpb.BlindedBeaconBlockBellatrix{
|
||||
// Body: ðpb.BlindedBeaconBlockBodyBellatrix{
|
||||
// Attestations: []*ethpb.Attestation{
|
||||
// {Signature: []byte{1, 2, 3, 4}},
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
//})
|
||||
//
|
||||
//sb.Block.Body.ExecutionPayloadHeader = header
|
||||
//sb.Block.Body.SyncAggregate.SyncCommitteeBits = bitfield.NewBitvector512()
|
||||
//if _, err := c.SubmitBlindedBlock(ctx, sb); err != nil {
|
||||
// return nil, err
|
||||
//}
|
||||
//
|
||||
//log.Fatal("End of test")
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Service) Start() {}
|
||||
|
||||
func (s *Service) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) SubmitBlindedBlock(context.Context, *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *Service) GetHeader(context.Context, types.Slot, [32]byte, [48]byte) (*ethpb.SignedBuilderBid, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *Service) Status() error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *Service) RegisterValidator(context.Context, *ethpb.SignedValidatorRegistrationV1) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func HydrateSignedBlindedBeaconBlockBellatrix(b *ethpb.SignedBlindedBeaconBlockBellatrix) *ethpb.SignedBlindedBeaconBlockBellatrix {
|
||||
if b.Signature == nil {
|
||||
b.Signature = make([]byte, fieldparams.BLSSignatureLength)
|
||||
}
|
||||
b.Block = HydrateBlindedBeaconBlockBellatrix(b.Block)
|
||||
return b
|
||||
}
|
||||
|
||||
// HydrateBlindedBeaconBlockBellatrix hydrates a blinded beacon block with correct field length sizes
|
||||
// to comply with fssz marshalling and unmarshalling rules.
|
||||
func HydrateBlindedBeaconBlockBellatrix(b *ethpb.BlindedBeaconBlockBellatrix) *ethpb.BlindedBeaconBlockBellatrix {
|
||||
if b == nil {
|
||||
b = ðpb.BlindedBeaconBlockBellatrix{}
|
||||
}
|
||||
if b.ParentRoot == nil {
|
||||
b.ParentRoot = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
if b.StateRoot == nil {
|
||||
b.StateRoot = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
b.Body = HydrateBlindedBeaconBlockBodyBellatrix(b.Body)
|
||||
return b
|
||||
}
|
||||
|
||||
// HydrateBlindedBeaconBlockBodyBellatrix hydrates a blinded beacon block body with correct field length sizes
|
||||
// to comply with fssz marshalling and unmarshalling rules.
|
||||
func HydrateBlindedBeaconBlockBodyBellatrix(b *ethpb.BlindedBeaconBlockBodyBellatrix) *ethpb.BlindedBeaconBlockBodyBellatrix {
|
||||
if b == nil {
|
||||
b = ðpb.BlindedBeaconBlockBodyBellatrix{}
|
||||
}
|
||||
if b.RandaoReveal == nil {
|
||||
b.RandaoReveal = make([]byte, fieldparams.BLSSignatureLength)
|
||||
}
|
||||
if b.Graffiti == nil {
|
||||
b.Graffiti = make([]byte, 32)
|
||||
}
|
||||
if b.Eth1Data == nil {
|
||||
b.Eth1Data = ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
if b.SyncAggregate == nil {
|
||||
b.SyncAggregate = ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, 64),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
}
|
||||
if b.ExecutionPayloadHeader == nil {
|
||||
b.ExecutionPayloadHeader = ðpb.ExecutionPayloadHeader{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
65
beacon-chain/builder/service_test.go
Normal file
65
beacon-chain/builder/service_test.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/api/client/builder"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
)
|
||||
|
||||
func TestMergeMockRoundtrip(t *testing.T) {
|
||||
c, err := builder.NewClient("http://localhost:28545")
|
||||
require.NoError(t, err)
|
||||
|
||||
h := "a0513a503d5bd6e89a144c3268e5b7e9da9dbf63df125a360e3950a7d0d67131"
|
||||
data, err := hex.DecodeString(h)
|
||||
require.NoError(t, err)
|
||||
ctx := context.Background()
|
||||
header, err := c.GetHeader(ctx, 1, bytesutil.ToBytes32(data), [48]byte{})
|
||||
require.NoError(t, err)
|
||||
t.Log(header.Message.Value)
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 1024)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
bb := big.NewInt(1770307273)
|
||||
header.Message.Header.BaseFeePerGas = bb.Bytes()
|
||||
|
||||
t.Log(len(b.Block.Body.Attestations))
|
||||
t.Log(len(b.Block.Body.Deposits))
|
||||
t.Log(len(b.Block.Body.VoluntaryExits))
|
||||
t.Log(len(b.Block.Body.ProposerSlashings))
|
||||
t.Log(len(b.Block.Body.AttesterSlashings))
|
||||
t.Log(b.Block.Body.AttesterSlashings[0].Attestation_1.AttestingIndices)
|
||||
t.Log(header.Message.Header.BaseFeePerGas)
|
||||
|
||||
sb := HydrateSignedBlindedBeaconBlockBellatrix(ðpb.SignedBlindedBeaconBlockBellatrix{
|
||||
Signature: keys[0].Sign([]byte("hello")).Marshal(),
|
||||
Block: ðpb.BlindedBeaconBlockBellatrix{
|
||||
Slot: b.Block.Slot,
|
||||
ParentRoot: b.Block.ParentRoot,
|
||||
StateRoot: b.Block.StateRoot,
|
||||
ProposerIndex: b.Block.ProposerIndex,
|
||||
Body: ðpb.BlindedBeaconBlockBodyBellatrix{
|
||||
Attestations: b.Block.Body.Attestations,
|
||||
RandaoReveal: b.Block.Body.RandaoReveal,
|
||||
Deposits: b.Block.Body.Deposits,
|
||||
VoluntaryExits: b.Block.Body.VoluntaryExits,
|
||||
ProposerSlashings: b.Block.Body.ProposerSlashings,
|
||||
//AttesterSlashings: b.Block.Body.AttesterSlashings,
|
||||
Graffiti: b.Block.Body.Graffiti,
|
||||
ExecutionPayloadHeader: header.Message.Header,
|
||||
},
|
||||
},
|
||||
})
|
||||
if _, err := c.SubmitBlindedBlock(ctx, sb); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
29
beacon-chain/builder/verify.go
Normal file
29
beacon-chain/builder/verify.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
func VerifyRegistrationSignature(
|
||||
slot types.Slot,
|
||||
fork *ethpb.Fork,
|
||||
signed *ethpb.SignedValidatorRegistrationV1,
|
||||
genesisRoot []byte,
|
||||
) error {
|
||||
if signed == nil || signed.Message == nil {
|
||||
return errors.New("nil signed registration")
|
||||
}
|
||||
domain, err := signing.Domain(fork, slots.ToEpoch(slot), [4]byte{} /*TODO: Use registration signing domain */, genesisRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := signing.VerifySigningRoot(signed, signed.Message.Pubkey, signed.Signature, domain); err != nil {
|
||||
return signing.ErrSigFailedToVerify
|
||||
}
|
||||
return nil
|
||||
}
|
||||
2
beacon-chain/cache/committee_test.go
vendored
2
beacon-chain/cache/committee_test.go
vendored
@@ -110,7 +110,7 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
sort.Slice(k, func(i, j int) bool {
|
||||
return k[i].(string) < k[j].(string)
|
||||
})
|
||||
wanted := end - int(maxCommitteesCacheSize)
|
||||
wanted := end - maxCommitteesCacheSize
|
||||
s := bytesutil.ToBytes32([]byte(strconv.Itoa(wanted)))
|
||||
assert.Equal(t, key(s), k[0], "incorrect key received for slot 190")
|
||||
|
||||
|
||||
@@ -430,7 +430,11 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
|
||||
}
|
||||
trie, err := trie.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate deposit trie")
|
||||
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
|
||||
rootA, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
rootB, err := cachedDeposits.Deposits.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, rootA, rootB)
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
@@ -488,7 +492,11 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
}
|
||||
trie, err := trie.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate deposit trie")
|
||||
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
|
||||
rootA, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
rootB, err := cachedDeposits.Deposits.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, rootA, rootB)
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_HandleZeroDeposits(t *testing.T) {
|
||||
|
||||
@@ -41,7 +41,6 @@ go_library(
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -143,7 +143,8 @@ func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T)
|
||||
},
|
||||
}
|
||||
balances := []uint64{0, 50}
|
||||
root := depositTrie.HashTreeRoot()
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
beaconState, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
@@ -202,7 +203,8 @@ func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
|
||||
dep[0].Data.Signature = make([]byte, 96)
|
||||
trie, _, err := util.DepositTrieFromDeposits(dep)
|
||||
require.NoError(t, err)
|
||||
root := trie.HashTreeRoot()
|
||||
root, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
DepositCount: 1,
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/math"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -268,16 +267,12 @@ func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, v
|
||||
leak := helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch)
|
||||
|
||||
// Modified in Altair and Bellatrix.
|
||||
var inactivityDenominator uint64
|
||||
bias := cfg.InactivityScoreBias
|
||||
switch beaconState.Version() {
|
||||
case version.Altair:
|
||||
inactivityDenominator = bias * cfg.InactivityPenaltyQuotientAltair
|
||||
case version.Bellatrix:
|
||||
inactivityDenominator = bias * cfg.InactivityPenaltyQuotientBellatrix
|
||||
default:
|
||||
return nil, nil, errors.Errorf("invalid state type version: %T", beaconState.Version())
|
||||
inactivityPenaltyQuotient, err := beaconState.InactivityPenaltyQuotient()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
inactivityDenominator := bias * inactivityPenaltyQuotient
|
||||
|
||||
for i, v := range vals {
|
||||
rewards[i], penalties[i], err = attestationDelta(bal, v, baseRewardMultiplier, inactivityDenominator, leak)
|
||||
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
e "github.com/prysmaticlabs/prysm/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -70,22 +68,14 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) (state.BeaconSta
|
||||
}
|
||||
|
||||
// Modified in Altair and Bellatrix.
|
||||
cfg := params.BeaconConfig()
|
||||
switch state.Version() {
|
||||
case version.Altair:
|
||||
state, err = e.ProcessSlashings(state, cfg.ProportionalSlashingMultiplierAltair)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case version.Bellatrix:
|
||||
state, err = e.ProcessSlashings(state, cfg.ProportionalSlashingMultiplierBellatrix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("invalid state type version: %T", state.Version())
|
||||
proportionalSlashingMultipler, err := state.ProportionalSlashingMultiplier()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = e.ProcessSlashings(state, proportionalSlashingMultipler)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
state, err = e.ProcessEth1DataReset(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -173,7 +173,8 @@ func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T)
|
||||
},
|
||||
}
|
||||
balances := []uint64{0, 50}
|
||||
root := depositTrie.HashTreeRoot()
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
@@ -233,7 +234,8 @@ func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
|
||||
dep[0].Data.Signature = make([]byte, 96)
|
||||
trie, _, err := util.DepositTrieFromDeposits(dep)
|
||||
require.NoError(t, err)
|
||||
root := trie.HashTreeRoot()
|
||||
root, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
DepositCount: 1,
|
||||
@@ -289,7 +291,9 @@ func TestPreGenesisDeposits_SkipInvalidDeposit(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
dep[i].Proof = proof
|
||||
}
|
||||
root := trie.HashTreeRoot()
|
||||
root, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
DepositCount: 1,
|
||||
@@ -376,7 +380,9 @@ func TestProcessDeposit_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T)
|
||||
},
|
||||
}
|
||||
balances := []uint64{0, 50}
|
||||
root := depositTrie.HashTreeRoot()
|
||||
root, err := depositTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
|
||||
@@ -41,7 +41,10 @@ func UpdateGenesisEth1Data(state state.BeaconState, deposits []*ethpb.Deposit, e
|
||||
}
|
||||
}
|
||||
|
||||
depositRoot := t.HashTreeRoot()
|
||||
depositRoot, err := t.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eth1Data.DepositRoot = depositRoot[:]
|
||||
err = state.SetEth1Data(eth1Data)
|
||||
if err != nil {
|
||||
|
||||
@@ -20,7 +20,9 @@ import (
|
||||
var runAmount = 25
|
||||
|
||||
func BenchmarkExecuteStateTransition_FullBlock(b *testing.B) {
|
||||
benchmark.SetBenchmarkConfig()
|
||||
undo, err := benchmark.SetBenchmarkConfig()
|
||||
require.NoError(b, err)
|
||||
defer undo()
|
||||
beaconState, err := benchmark.PreGenState1Epoch()
|
||||
require.NoError(b, err)
|
||||
cleanStates := clonedStates(beaconState)
|
||||
@@ -37,7 +39,9 @@ func BenchmarkExecuteStateTransition_FullBlock(b *testing.B) {
|
||||
}
|
||||
|
||||
func BenchmarkExecuteStateTransition_WithCache(b *testing.B) {
|
||||
benchmark.SetBenchmarkConfig()
|
||||
undo, err := benchmark.SetBenchmarkConfig()
|
||||
require.NoError(b, err)
|
||||
defer undo()
|
||||
|
||||
beaconState, err := benchmark.PreGenState1Epoch()
|
||||
require.NoError(b, err)
|
||||
@@ -67,7 +71,9 @@ func BenchmarkExecuteStateTransition_WithCache(b *testing.B) {
|
||||
}
|
||||
|
||||
func BenchmarkProcessEpoch_2FullEpochs(b *testing.B) {
|
||||
benchmark.SetBenchmarkConfig()
|
||||
undo, err := benchmark.SetBenchmarkConfig()
|
||||
require.NoError(b, err)
|
||||
defer undo()
|
||||
beaconState, err := benchmark.PreGenstateFullEpochs()
|
||||
require.NoError(b, err)
|
||||
|
||||
|
||||
@@ -579,10 +579,10 @@ func TestProcessSlots_OnlyAltairEpoch(t *testing.T) {
|
||||
|
||||
func TestProcessSlots_OnlyBellatrixEpoch(t *testing.T) {
|
||||
transition.SkipSlotCache.Disable()
|
||||
conf := params.BeaconConfig()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
conf := params.BeaconConfig().Copy()
|
||||
conf.BellatrixForkEpoch = 5
|
||||
params.OverrideBeaconConfig(conf)
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*6))
|
||||
|
||||
@@ -14,7 +14,7 @@ var ErrNotFoundState = kv.ErrNotFoundState
|
||||
// ErrNotFoundOriginBlockRoot wraps ErrNotFound for an error specific to the origin block root.
|
||||
var ErrNotFoundOriginBlockRoot = kv.ErrNotFoundOriginBlockRoot
|
||||
|
||||
// ErrNotFoundOriginBlockRoot wraps ErrNotFound for an error specific to the origin block root.
|
||||
// ErrNotFoundBackfillBlockRoot wraps ErrNotFound for an error specific to the backfill block root.
|
||||
var ErrNotFoundBackfillBlockRoot = kv.ErrNotFoundBackfillBlockRoot
|
||||
|
||||
// ErrNotFoundGenesisBlockRoot means no genesis block root was found, indicating the db was not initialized with genesis
|
||||
|
||||
@@ -16,7 +16,7 @@ var ErrNotFoundOriginBlockRoot = errors.Wrap(ErrNotFound, "OriginBlockRoot")
|
||||
// ErrNotFoundGenesisBlockRoot means no genesis block root was found, indicating the db was not initialized with genesis
|
||||
var ErrNotFoundGenesisBlockRoot = errors.Wrap(ErrNotFound, "OriginGenesisRoot")
|
||||
|
||||
// ErrNotFoundOriginBlockRoot is an error specifically for the origin block root getter
|
||||
// ErrNotFoundBackfillBlockRoot is an error specifically for the origin block root getter
|
||||
var ErrNotFoundBackfillBlockRoot = errors.Wrap(ErrNotFound, "BackfillBlockRoot")
|
||||
|
||||
// ErrNotFoundFeeRecipient is a not found error specifically for the fee recipient getter
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/iface"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
)
|
||||
|
||||
@@ -28,41 +29,70 @@ func testGenesisDataSaved(t *testing.T, db iface.Database) {
|
||||
ctx := context.Background()
|
||||
|
||||
gb, err := db.GenesisBlock(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, gb)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, gb)
|
||||
|
||||
gbHTR, err := gb.Block().HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
gss, err := db.StateSummary(ctx, gbHTR)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, gss)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, gss)
|
||||
|
||||
head, err := db.HeadBlock(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, head)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, head)
|
||||
|
||||
headHTR, err := head.Block().HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, gbHTR, headHTR, "head block does not match genesis block")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, gbHTR, headHTR, "head block does not match genesis block")
|
||||
}
|
||||
|
||||
func TestLoadGenesisFromFile(t *testing.T) {
|
||||
// for this test to work, we need the active config to have these properties:
|
||||
// - fork version schedule that matches mainnnet.genesis.ssz
|
||||
// - name that does not match params.MainnetName - otherwise we'll trigger the codepath that loads the state
|
||||
// from the compiled binary.
|
||||
// to do that, first we need to rewrite the mainnet fork schedule so it won't conflict with a renamed config that
|
||||
// uses the mainnet fork schedule. construct the differently named mainnet config and set it active.
|
||||
// finally, revert all this at the end of the test.
|
||||
|
||||
// first get the real mainnet out of the way by overwriting it schedule.
|
||||
cfg, err := params.ByName(params.MainnetName)
|
||||
require.NoError(t, err)
|
||||
cfg = cfg.Copy()
|
||||
reversioned := cfg.Copy()
|
||||
params.FillTestVersions(reversioned, 127)
|
||||
undo, err := params.SetActiveWithUndo(reversioned)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
|
||||
// then set up a new config, which uses the real mainnet schedule, and activate it
|
||||
cfg.ConfigName = "genesis-test"
|
||||
undo2, err := params.SetActiveWithUndo(cfg)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, undo2())
|
||||
}()
|
||||
|
||||
fp := "testdata/mainnet.genesis.ssz"
|
||||
rfp, err := bazel.Runfile(fp)
|
||||
if err == nil {
|
||||
fp = rfp
|
||||
}
|
||||
sb, err := os.ReadFile(fp)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
db := setupDB(t)
|
||||
assert.NoError(t, db.LoadGenesis(context.Background(), sb))
|
||||
require.NoError(t, db.LoadGenesis(context.Background(), sb))
|
||||
testGenesisDataSaved(t, db)
|
||||
|
||||
// Loading the same genesis again should not throw an error
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, db.LoadGenesis(context.Background(), sb))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.LoadGenesis(context.Background(), sb))
|
||||
testGenesisDataSaved(t, db)
|
||||
}
|
||||
|
||||
func TestLoadGenesisFromFile_mismatchedForkVersion(t *testing.T) {
|
||||
@@ -80,11 +110,15 @@ func TestLoadGenesisFromFile_mismatchedForkVersion(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEnsureEmbeddedGenesis(t *testing.T) {
|
||||
// Embedded Genesis works with Mainnet config
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.ConfigName = params.MainnetName
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
// Embedded Genesis works with Mainnet config
|
||||
cfg := params.MainnetConfig().Copy()
|
||||
cfg.SecondsPerSlot = 1
|
||||
undo, err := params.SetActiveWithUndo(cfg)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
|
||||
ctx := context.Background()
|
||||
db := setupDB(t)
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
func init() {
|
||||
// Override network name so that hardcoded genesis files are not loaded.
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.ConfigName = "test"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
if err := params.SetActive(params.MainnetTestConfig()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,11 +12,9 @@ import (
|
||||
)
|
||||
|
||||
func TestSaveOrigin(t *testing.T) {
|
||||
// Embedded Genesis works with Mainnet config
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.ConfigName = params.MainnetName
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
// Embedded Genesis works with Mainnet config
|
||||
params.OverrideBeaconConfig(params.MainnetConfig().Copy())
|
||||
|
||||
ctx := context.Background()
|
||||
db := setupDB(t)
|
||||
|
||||
@@ -81,7 +81,7 @@ func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error)
|
||||
if i != len(children)-1 {
|
||||
children[i] = children[len(children)-1]
|
||||
}
|
||||
node.parent.children = children[:len(children)-2]
|
||||
node.parent.children = children[:len(children)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -203,3 +203,26 @@ func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
|
||||
f.store.proposerBoostLock.RUnlock()
|
||||
}
|
||||
|
||||
// This is a regression test (10565)
|
||||
// ----- C
|
||||
// /
|
||||
// A <- B
|
||||
// \
|
||||
// ----------D
|
||||
// D is invalid
|
||||
|
||||
func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'D'}, 1, 1))
|
||||
|
||||
_, err := f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(f.store.nodeByRoot[[32]byte{'a'}].children))
|
||||
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -75,7 +75,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -101,7 +101,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -111,35 +111,37 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
|
||||
|
||||
// Insert a second block at slot 3 into the tree and boost its score.
|
||||
// Insert a second block at slot 4 into the tree and boost its score.
|
||||
// 0
|
||||
// |
|
||||
// 1
|
||||
// |
|
||||
// 2
|
||||
// / \
|
||||
// 3 4 <- HEAD
|
||||
slot = types.Slot(3)
|
||||
// 3 |
|
||||
// 4 <- HEAD
|
||||
slot = types.Slot(4)
|
||||
newRoot = indexToHash(4)
|
||||
require.NoError(t,
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
indexToHash(2),
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
|
||||
clockSlot := types.Slot(3)
|
||||
clockSlot := types.Slot(4)
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: newRoot,
|
||||
BlockSlot: slot,
|
||||
CurrentSlot: clockSlot,
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, args))
|
||||
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -166,17 +168,27 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
//
|
||||
// In this case, we have a small fork:
|
||||
//
|
||||
// (A: 54) -> (B: 44) -> (C: 34)
|
||||
// (A: 54) -> (B: 44) -> (C: 10)
|
||||
// \_->(D: 24)
|
||||
//
|
||||
// So B has its own weight, 10, and the sum of both C and D. That's why we see weight 54 in the
|
||||
// middle instead of the normal progression of (44 -> 34 -> 24).
|
||||
// middle instead of the normal progression of (54 -> 44 -> 24).
|
||||
node1 := f.store.nodeByRoot[indexToHash(1)]
|
||||
require.Equal(t, node1.weight, uint64(54))
|
||||
node2 := f.store.nodeByRoot[indexToHash(2)]
|
||||
require.Equal(t, node2.weight, uint64(44))
|
||||
node3 := f.store.nodeByRoot[indexToHash(4)]
|
||||
require.Equal(t, node3.weight, uint64(24))
|
||||
node3 := f.store.nodeByRoot[indexToHash(3)]
|
||||
require.Equal(t, node3.weight, uint64(10))
|
||||
node4 := f.store.nodeByRoot[indexToHash(4)]
|
||||
require.Equal(t, node4.weight, uint64(24))
|
||||
|
||||
// Regression: process attestations for C, check that it
|
||||
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight
|
||||
f.ProcessAttestation(ctx, []uint64{4, 5}, indexToHash(3), fEpoch)
|
||||
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(3), headRoot, "Incorrect head for justified epoch at slot 4")
|
||||
|
||||
})
|
||||
t.Run("vanilla ex ante attack", func(t *testing.T) {
|
||||
f := setup(jEpoch, fEpoch)
|
||||
|
||||
@@ -24,6 +24,7 @@ func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
|
||||
|
||||
// SetOptimisticToValid is called with the root of a block that was returned as
|
||||
// VALID by the EL.
|
||||
//
|
||||
// WARNING: This method returns an error if the root is not found in forkchoice
|
||||
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [32]byte) error {
|
||||
f.store.nodesLock.Lock()
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -75,7 +75,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -101,7 +101,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -111,29 +111,30 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
|
||||
|
||||
// Insert a second block at slot 3 into the tree and boost its score.
|
||||
// Insert a second block at slot 4 into the tree and boost its score.
|
||||
// 0
|
||||
// |
|
||||
// 1
|
||||
// |
|
||||
// 2
|
||||
// / \
|
||||
// 3 4 <- HEAD
|
||||
slot = types.Slot(3)
|
||||
// 3 |
|
||||
// 4 <- HEAD
|
||||
slot = types.Slot(4)
|
||||
newRoot = indexToHash(4)
|
||||
require.NoError(t,
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
indexToHash(2),
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
|
||||
clockSlot := types.Slot(3)
|
||||
clockSlot := types.Slot(4)
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: newRoot,
|
||||
BlockSlot: slot,
|
||||
@@ -166,14 +167,22 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
//
|
||||
// In this case, we have a small fork:
|
||||
//
|
||||
// (A: 54) -> (B: 44) -> (C: 24)
|
||||
// \_->(D: 10)
|
||||
// (A: 54) -> (B: 44) -> (C: 10)
|
||||
// \_->(D: 24)
|
||||
//
|
||||
// So B has its own weight, 10, and the sum of both C and D. That's why we see weight 54 in the
|
||||
// middle instead of the normal progression of (44 -> 34 -> 24).
|
||||
// middle instead of the normal progression of (54 -> 44 -> 24).
|
||||
require.Equal(t, f.store.nodes[1].weight, uint64(54))
|
||||
require.Equal(t, f.store.nodes[2].weight, uint64(44))
|
||||
require.Equal(t, f.store.nodes[3].weight, uint64(34))
|
||||
require.Equal(t, f.store.nodes[3].weight, uint64(10))
|
||||
require.Equal(t, f.store.nodes[4].weight, uint64(24))
|
||||
|
||||
// Regression: process attestations for C, check that it
|
||||
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight
|
||||
f.ProcessAttestation(ctx, []uint64{4, 5}, indexToHash(3), fEpoch)
|
||||
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(3), headRoot, "Incorrect head for justified epoch at slot 4")
|
||||
})
|
||||
t.Run("vanilla ex ante attack", func(t *testing.T) {
|
||||
f := setup(jEpoch, fEpoch)
|
||||
|
||||
@@ -130,7 +130,7 @@ func TestUpdateSyncCommitteeTrackedVals(t *testing.T) {
|
||||
|
||||
func TestNewService(t *testing.T) {
|
||||
config := &ValidatorMonitorConfig{}
|
||||
tracked := []types.ValidatorIndex{}
|
||||
var tracked []types.ValidatorIndex
|
||||
ctx := context.Background()
|
||||
_, err := NewService(ctx, config, tracked)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"//api/gateway:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
@@ -22,19 +23,22 @@ func configureTracing(cliCtx *cli.Context) error {
|
||||
)
|
||||
}
|
||||
|
||||
func configureChainConfig(cliCtx *cli.Context) {
|
||||
func configureChainConfig(cliCtx *cli.Context) error {
|
||||
if cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
|
||||
chainConfigFileName := cliCtx.String(cmd.ChainConfigFileFlag.Name)
|
||||
params.LoadChainConfigFile(chainConfigFileName, nil)
|
||||
return params.LoadChainConfigFile(chainConfigFileName, nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func configureHistoricalSlasher(cliCtx *cli.Context) {
|
||||
func configureHistoricalSlasher(cliCtx *cli.Context) error {
|
||||
if cliCtx.Bool(flags.HistoricalSlasherNode.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c := params.BeaconConfig().Copy()
|
||||
// Save a state every 4 epochs.
|
||||
c.SlotsPerArchivedPoint = params.BeaconConfig().SlotsPerEpoch * 4
|
||||
params.OverrideBeaconConfig(c)
|
||||
if err := params.SetActive(c); err != nil {
|
||||
return err
|
||||
}
|
||||
cmdConfig := cmd.Get()
|
||||
// Allow up to 4096 attestations at a time to be requested from the beacon nde.
|
||||
cmdConfig.MaxRPCPageSize = int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxAttestations)) // lint:ignore uintcast -- Page size should not exceed int64 with these constants.
|
||||
@@ -45,40 +49,52 @@ func configureHistoricalSlasher(cliCtx *cli.Context) {
|
||||
cmdConfig.MaxRPCPageSize,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func configureSafeSlotsToImportOptimistically(cliCtx *cli.Context) {
|
||||
func configureSafeSlotsToImportOptimistically(cliCtx *cli.Context) error {
|
||||
if cliCtx.IsSet(flags.SafeSlotsToImportOptimistically.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c := params.BeaconConfig().Copy()
|
||||
c.SafeSlotsToImportOptimistically = types.Slot(cliCtx.Int(flags.SafeSlotsToImportOptimistically.Name))
|
||||
params.OverrideBeaconConfig(c)
|
||||
if err := params.SetActive(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func configureSlotsPerArchivedPoint(cliCtx *cli.Context) {
|
||||
func configureSlotsPerArchivedPoint(cliCtx *cli.Context) error {
|
||||
if cliCtx.IsSet(flags.SlotsPerArchivedPoint.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c := params.BeaconConfig().Copy()
|
||||
c.SlotsPerArchivedPoint = types.Slot(cliCtx.Int(flags.SlotsPerArchivedPoint.Name))
|
||||
params.OverrideBeaconConfig(c)
|
||||
if err := params.SetActive(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func configureEth1Config(cliCtx *cli.Context) {
|
||||
func configureEth1Config(cliCtx *cli.Context) error {
|
||||
c := params.BeaconConfig().Copy()
|
||||
if cliCtx.IsSet(flags.ChainID.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.DepositChainID = cliCtx.Uint64(flags.ChainID.Name)
|
||||
params.OverrideBeaconConfig(c)
|
||||
if err := params.SetActive(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cliCtx.IsSet(flags.NetworkID.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.DepositNetworkID = cliCtx.Uint64(flags.NetworkID.Name)
|
||||
params.OverrideBeaconConfig(c)
|
||||
if err := params.SetActive(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cliCtx.IsSet(flags.DepositContractFlag.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.DepositContractAddress = cliCtx.String(flags.DepositContractFlag.Name)
|
||||
params.OverrideBeaconConfig(c)
|
||||
if err := params.SetActive(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func configureNetwork(cliCtx *cli.Context) {
|
||||
@@ -94,17 +110,22 @@ func configureNetwork(cliCtx *cli.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func configureInteropConfig(cliCtx *cli.Context) {
|
||||
func configureInteropConfig(cliCtx *cli.Context) error {
|
||||
// an explicit chain config was specified, don't mess with it
|
||||
if cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
|
||||
return nil
|
||||
}
|
||||
genStateIsSet := cliCtx.IsSet(flags.InteropGenesisStateFlag.Name)
|
||||
genTimeIsSet := cliCtx.IsSet(flags.InteropGenesisTimeFlag.Name)
|
||||
numValsIsSet := cliCtx.IsSet(flags.InteropNumValidatorsFlag.Name)
|
||||
votesIsSet := cliCtx.IsSet(flags.InteropMockEth1DataVotesFlag.Name)
|
||||
|
||||
if genStateIsSet || genTimeIsSet || numValsIsSet || votesIsSet {
|
||||
bCfg := params.BeaconConfig()
|
||||
bCfg.ConfigName = "interop"
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
if err := params.SetActive(params.InteropConfig().Copy()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func configureExecutionSetting(cliCtx *cli.Context) error {
|
||||
@@ -112,12 +133,22 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
c := params.BeaconConfig()
|
||||
c := params.BeaconConfig().Copy()
|
||||
ha := cliCtx.String(flags.SuggestedFeeRecipient.Name)
|
||||
if !common.IsHexAddress(ha) {
|
||||
return fmt.Errorf("%s is not a valid fee recipient address", ha)
|
||||
}
|
||||
c.DefaultFeeRecipient = common.HexToAddress(ha)
|
||||
params.OverrideBeaconConfig(c)
|
||||
return nil
|
||||
mixedcaseAddress, err := common.NewMixedcaseAddressFromString(ha)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not decode fee recipient %s", ha)
|
||||
}
|
||||
checksumAddress := common.HexToAddress(ha)
|
||||
if !mixedcaseAddress.ValidChecksum() {
|
||||
log.Warnf("Fee recipient %s is not a checksum Ethereum address. "+
|
||||
"The checksummed address is %s and will be used as the fee recipient. "+
|
||||
"We recommend using a mixed-case address (checksum) "+
|
||||
"to prevent spelling mistakes in your fee recipient Ethereum address", ha, checksumAddress.Hex())
|
||||
}
|
||||
c.DefaultFeeRecipient = checksumAddress
|
||||
return params.SetActive(c)
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ func TestConfigureHistoricalSlasher(t *testing.T) {
|
||||
set.Bool(flags.HistoricalSlasherNode.Name, true, "")
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
|
||||
configureHistoricalSlasher(cliCtx)
|
||||
require.NoError(t, configureHistoricalSlasher(cliCtx))
|
||||
|
||||
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch*4, params.BeaconConfig().SlotsPerArchivedPoint)
|
||||
assert.LogsContain(t, hook,
|
||||
@@ -46,7 +46,7 @@ func TestConfigureSafeSlotsToImportOptimistically(t *testing.T) {
|
||||
require.NoError(t, set.Set(flags.SafeSlotsToImportOptimistically.Name, strconv.Itoa(128)))
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
|
||||
configureSafeSlotsToImportOptimistically(cliCtx)
|
||||
require.NoError(t, configureSafeSlotsToImportOptimistically(cliCtx))
|
||||
|
||||
assert.Equal(t, types.Slot(128), params.BeaconConfig().SafeSlotsToImportOptimistically)
|
||||
}
|
||||
@@ -60,7 +60,7 @@ func TestConfigureSlotsPerArchivedPoint(t *testing.T) {
|
||||
require.NoError(t, set.Set(flags.SlotsPerArchivedPoint.Name, strconv.Itoa(100)))
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
|
||||
configureSlotsPerArchivedPoint(cliCtx)
|
||||
require.NoError(t, configureSlotsPerArchivedPoint(cliCtx))
|
||||
|
||||
assert.Equal(t, types.Slot(100), params.BeaconConfig().SlotsPerArchivedPoint)
|
||||
}
|
||||
@@ -78,7 +78,7 @@ func TestConfigureProofOfWork(t *testing.T) {
|
||||
require.NoError(t, set.Set(flags.DepositContractFlag.Name, "deposit-contract"))
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
|
||||
configureEth1Config(cliCtx)
|
||||
require.NoError(t, configureEth1Config(cliCtx))
|
||||
|
||||
assert.Equal(t, uint64(100), params.BeaconConfig().DepositChainID)
|
||||
assert.Equal(t, uint64(200), params.BeaconConfig().DepositNetworkID)
|
||||
@@ -87,6 +87,7 @@ func TestConfigureProofOfWork(t *testing.T) {
|
||||
|
||||
func TestConfigureExecutionSetting(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
@@ -102,11 +103,15 @@ func TestConfigureExecutionSetting(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.HexToAddress("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
|
||||
|
||||
require.NoError(t, set.Set(flags.SuggestedFeeRecipient.Name, "0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"))
|
||||
assert.LogsContain(t, hook,
|
||||
"is not a checksum Ethereum address",
|
||||
)
|
||||
require.NoError(t, set.Set(flags.SuggestedFeeRecipient.Name, "0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa"))
|
||||
cliCtx = cli.NewContext(&app, set, nil)
|
||||
err = configureExecutionSetting(cliCtx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.HexToAddress("0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
|
||||
assert.Equal(t, common.HexToAddress("0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa"), params.BeaconConfig().DefaultFeeRecipient)
|
||||
|
||||
}
|
||||
|
||||
func TestConfigureNetwork(t *testing.T) {
|
||||
@@ -193,7 +198,7 @@ func TestConfigureInterop(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
configureInteropConfig(tt.flagSetter())
|
||||
require.NoError(t, configureInteropConfig(tt.flagSetter()))
|
||||
assert.DeepEqual(t, tt.configName, params.BeaconConfig().ConfigName)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
apigateway "github.com/prysmaticlabs/prysm/api/gateway"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
@@ -76,6 +77,7 @@ const debugGrpcMaxMsgSize = 1 << 27
|
||||
type serviceFlagOpts struct {
|
||||
blockchainFlagOpts []blockchain.Option
|
||||
powchainFlagOpts []powchain.Option
|
||||
builderOpts []builder.Option
|
||||
}
|
||||
|
||||
// BeaconNode defines a struct that handles the services running a random beacon chain
|
||||
@@ -118,16 +120,32 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
return nil, err
|
||||
}
|
||||
prereqs.WarnIfPlatformNotSupported(cliCtx.Context)
|
||||
features.ConfigureBeaconChain(cliCtx)
|
||||
cmd.ConfigureBeaconChain(cliCtx)
|
||||
if err := features.ConfigureBeaconChain(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cmd.ConfigureBeaconChain(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
flags.ConfigureGlobalFlags(cliCtx)
|
||||
configureChainConfig(cliCtx)
|
||||
configureHistoricalSlasher(cliCtx)
|
||||
configureSafeSlotsToImportOptimistically(cliCtx)
|
||||
configureSlotsPerArchivedPoint(cliCtx)
|
||||
configureEth1Config(cliCtx)
|
||||
if err := configureChainConfig(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := configureHistoricalSlasher(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := configureSafeSlotsToImportOptimistically(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := configureSlotsPerArchivedPoint(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := configureEth1Config(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configureNetwork(cliCtx)
|
||||
configureInteropConfig(cliCtx)
|
||||
if err := configureInteropConfig(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := configureExecutionSetting(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -210,6 +228,11 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
log.Debugln("Starting Fork Choice")
|
||||
beacon.startForkChoice()
|
||||
|
||||
log.Debugln("Registering builder service")
|
||||
if err := beacon.registerBuilderService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Blockchain Service")
|
||||
if err := beacon.registerBlockchainService(); err != nil {
|
||||
return nil, err
|
||||
@@ -550,6 +573,14 @@ func (b *BeaconNode) fetchP2P() p2p.P2P {
|
||||
return p
|
||||
}
|
||||
|
||||
func (b *BeaconNode) fetchBuilderService() *builder.Service {
|
||||
var s *builder.Service
|
||||
if err := b.services.FetchService(&s); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerAttestationPool() error {
|
||||
s, err := attestations.NewService(b.ctx, &attestations.Config{
|
||||
Pool: b.attestationPool,
|
||||
@@ -779,6 +810,7 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
PeerManager: p2pService,
|
||||
MetadataProvider: p2pService,
|
||||
ChainInfoFetcher: chainService,
|
||||
HeadUpdater: chainService,
|
||||
HeadFetcher: chainService,
|
||||
CanonicalFetcher: chainService,
|
||||
ForkFetcher: chainService,
|
||||
@@ -787,6 +819,8 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
BlockBuilder: b.fetchBuilderService(),
|
||||
OptimisticModeFetcher: chainService,
|
||||
AttestationsPool: b.attestationPool,
|
||||
ExitPool: b.exitPool,
|
||||
SlashingsPool: b.slashingsPool,
|
||||
@@ -950,3 +984,12 @@ func (b *BeaconNode) registerValidatorMonitorService() error {
|
||||
}
|
||||
return b.services.RegisterService(svc)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerBuilderService() error {
|
||||
options := b.serviceFlagOpts.builderOpts
|
||||
svc, err := builder.NewService(b.ctx, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return b.services.RegisterService(svc)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package node
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
)
|
||||
|
||||
@@ -23,3 +24,11 @@ func WithPowchainFlagOptions(opts []powchain.Option) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBuilderFlagOptions includes functional options for the builder service related to CLI flags.
|
||||
func WithBuilderFlagOptions(opts []builder.Option) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.serviceFlagOpts.builderOpts = opts
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -410,7 +410,7 @@ func TestRefreshENR_ForkBoundaries(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update params
|
||||
cfg := params.BeaconConfig()
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
@@ -441,7 +441,7 @@ func TestRefreshENR_ForkBoundaries(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update params
|
||||
cfg := params.BeaconConfig()
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
@@ -471,7 +471,7 @@ func TestRefreshENR_ForkBoundaries(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update params
|
||||
cfg := params.BeaconConfig()
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
@@ -114,6 +114,7 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
hook := logTest.NewGlobal()
|
||||
logrus.SetLevel(logrus.TraceLevel)
|
||||
port := 2000
|
||||
@@ -136,14 +137,13 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
UDPPort: uint(port),
|
||||
}
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
var listeners []*discover.UDPv5
|
||||
for i := 1; i <= 5; i++ {
|
||||
port = 3000 + i
|
||||
cfg.UDPPort = uint(port)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
|
||||
c := params.BeaconConfig()
|
||||
c := params.BeaconConfig().Copy()
|
||||
nextForkEpoch := types.Epoch(i)
|
||||
c.ForkVersionSchedule[[4]byte{'A', 'B', 'C', 'D'}] = nextForkEpoch
|
||||
params.OverrideBeaconConfig(c)
|
||||
@@ -209,7 +209,7 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
|
||||
func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig()
|
||||
c := params.BeaconConfig().Copy()
|
||||
c.ForkVersionSchedule = map[[4]byte]types.Epoch{
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): 0,
|
||||
{0, 0, 0, 1}: 1,
|
||||
@@ -264,7 +264,7 @@ func TestAddForkEntry_Genesis(t *testing.T) {
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
|
||||
bCfg := params.BeaconConfig()
|
||||
bCfg := params.MainnetConfig().Copy()
|
||||
bCfg.ForkVersionSchedule = map[[4]byte]types.Epoch{}
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)] = bCfg.GenesisEpoch
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
func TestCorrect_ActiveValidatorsCount(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.MainnetConfig()
|
||||
cfg := params.MainnetConfig().Copy()
|
||||
cfg.ConfigName = "test"
|
||||
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
@@ -54,7 +54,9 @@ func init() {
|
||||
for k, v := range gossipTopicMappings {
|
||||
GossipTypeMapping[reflect.TypeOf(v)] = k
|
||||
}
|
||||
// Specially handle Altair Objects.
|
||||
// Specially handle Altair objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockAltair{})] = BlockSubnetTopicFormat
|
||||
// Specially handle Bellatrix objects.
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockBellatrix{})] = BlockSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBlindedBeaconBlockBellatrix{})] = BlockSubnetTopicFormat
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
)
|
||||
|
||||
func TestMappingHasNoDuplicates(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
m := make(map[reflect.Type]bool)
|
||||
for _, v := range gossipTopicMappings {
|
||||
if _, ok := m[reflect.TypeOf(v)]; ok {
|
||||
@@ -23,7 +24,7 @@ func TestMappingHasNoDuplicates(t *testing.T) {
|
||||
|
||||
func TestGossipTopicMappings_CorrectBlockType(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bCfg := params.BeaconConfig()
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
altairForkEpoch := eth2types.Epoch(100)
|
||||
BellatrixForkEpoch := eth2types.Epoch(200)
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
)
|
||||
|
||||
func TestMsgID_HashesCorrectly(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
|
||||
d, err := forks.CreateForkDigest(time.Now(), genesisValidatorsRoot)
|
||||
assert.NoError(t, err)
|
||||
@@ -36,6 +37,7 @@ func TestMsgID_HashesCorrectly(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, genesisValidatorsRoot)
|
||||
assert.NoError(t, err)
|
||||
@@ -65,6 +67,7 @@ func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, genesisValidatorsRoot)
|
||||
assert.NoError(t, err)
|
||||
@@ -94,6 +97,7 @@ func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMsgID_WithNilTopic(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
msg := &pubsubpb.Message{
|
||||
Data: make([]byte, 32),
|
||||
Topic: nil,
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
)
|
||||
|
||||
func TestPrivateKeyLoading(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
file, err := os.CreateTemp(t.TempDir(), "key")
|
||||
require.NoError(t, err)
|
||||
key, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
@@ -44,6 +45,7 @@ func TestPrivateKeyLoading(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIPV6Support(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
key, err := gethCrypto.GenerateKey()
|
||||
require.NoError(t, err)
|
||||
db, err := enode.OpenDB("")
|
||||
|
||||
@@ -4,10 +4,12 @@ import (
|
||||
"testing"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
)
|
||||
|
||||
func TestOverlayParameters(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
pms := pubsubGossipParam()
|
||||
assert.Equal(t, gossipSubD, pms.D, "gossipSubD")
|
||||
assert.Equal(t, gossipSubDlo, pms.Dlo, "gossipSubDlo")
|
||||
@@ -15,6 +17,7 @@ func TestOverlayParameters(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGossipParameters(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
setPubSubParameters()
|
||||
pms := pubsubGossipParam()
|
||||
assert.Equal(t, gossipSubMcacheLen, pms.HistoryLength, "gossipSubMcacheLen")
|
||||
@@ -23,6 +26,7 @@ func TestGossipParameters(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFanoutParameters(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
pms := pubsubGossipParam()
|
||||
if pms.FanoutTTL != gossipSubFanoutTTL {
|
||||
t.Errorf("gossipSubFanoutTTL, wanted: %d, got: %d", gossipSubFanoutTTL, pms.FanoutTTL)
|
||||
@@ -30,6 +34,7 @@ func TestFanoutParameters(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeartbeatParameters(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
pms := pubsubGossipParam()
|
||||
if pms.HeartbeatInterval != gossipSubHeartbeatInterval {
|
||||
t.Errorf("gossipSubHeartbeatInterval, wanted: %d, got: %d", gossipSubHeartbeatInterval, pms.HeartbeatInterval)
|
||||
@@ -37,6 +42,7 @@ func TestHeartbeatParameters(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMiscParameters(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
setPubSubParameters()
|
||||
assert.Equal(t, randomSubD, pubsub.RandomSubD, "randomSubD")
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/network/forks"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
@@ -21,6 +22,7 @@ import (
|
||||
)
|
||||
|
||||
func TestService_CanSubscribe(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
currentFork := [4]byte{0x01, 0x02, 0x03, 0x04}
|
||||
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
genesisTime := time.Now()
|
||||
@@ -115,11 +117,13 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_CanSubscribe_uninitialized(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
s := &Service{}
|
||||
require.Equal(t, false, s.CanSubscribe("foo"))
|
||||
}
|
||||
|
||||
func Test_scanfcheck(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
type args struct {
|
||||
input string
|
||||
format string
|
||||
@@ -191,6 +195,7 @@ func Test_scanfcheck(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGossipTopicMapping_scanfcheck_GossipTopicFormattingSanityCheck(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
// scanfcheck only supports integer based substitutions at the moment. Any others will
|
||||
// inaccurately fail validation.
|
||||
for _, topic := range AllTopics() {
|
||||
@@ -208,6 +213,7 @@ func TestGossipTopicMapping_scanfcheck_GossipTopicFormattingSanityCheck(t *testi
|
||||
}
|
||||
|
||||
func TestService_FilterIncomingSubscriptions(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
genesisTime := time.Now()
|
||||
valRoot := [32]byte{}
|
||||
@@ -328,6 +334,7 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_MonitorsStateForkUpdates(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
notifier := &mock.MockStateNotifier{}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
)
|
||||
|
||||
func TestVerifyRPCMappings(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
assert.NoError(t, VerifyTopicMapping(RPCStatusTopicV1, &pb.Status{}), "Failed to verify status rpc topic")
|
||||
assert.NotNil(t, VerifyTopicMapping(RPCStatusTopicV1, new([]byte)), "Incorrect message type verified for status rpc topic")
|
||||
|
||||
@@ -25,6 +26,7 @@ func TestVerifyRPCMappings(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTopicDeconstructor(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
tt := []struct {
|
||||
name string
|
||||
topic string
|
||||
@@ -81,7 +83,7 @@ func TestTopicDeconstructor(t *testing.T) {
|
||||
|
||||
func TestTopicFromMessage_CorrectType(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bCfg := params.BeaconConfig()
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
forkEpoch := eth2types.Epoch(100)
|
||||
bCfg.AltairForkEpoch = forkEpoch
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = eth2types.Epoch(100)
|
||||
|
||||
@@ -6,17 +6,18 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
testp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestService_Send(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
p1 := testp2p.NewTestP2P(t)
|
||||
p2 := testp2p.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/network/forks"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
@@ -81,6 +82,7 @@ func createHost(t *testing.T, port int) (host.Host, *ecdsa.PrivateKey, net.IP) {
|
||||
}
|
||||
|
||||
func TestService_Stop_SetsStartedToFalse(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
s, err := NewService(context.Background(), &Config{StateNotifier: &mock.MockStateNotifier{}})
|
||||
require.NoError(t, err)
|
||||
s.started = true
|
||||
@@ -90,12 +92,14 @@ func TestService_Stop_SetsStartedToFalse(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_Stop_DontPanicIfDv5ListenerIsNotInited(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
s, err := NewService(context.Background(), &Config{StateNotifier: &mock.MockStateNotifier{}})
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, s.Stop())
|
||||
}
|
||||
|
||||
func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
cfg := &Config{
|
||||
@@ -131,12 +135,14 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_Status_NotRunning(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
s := &Service{started: false}
|
||||
s.dv5Listener = &mockListener{}
|
||||
assert.ErrorContains(t, "not running", s.Status(), "Status returned wrong error")
|
||||
}
|
||||
|
||||
func TestService_Status_NoGenesisTimeSet(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
s := &Service{started: true}
|
||||
s.dv5Listener = &mockListener{}
|
||||
assert.ErrorContains(t, "no genesis time set", s.Status(), "Status returned wrong error")
|
||||
@@ -147,6 +153,7 @@ func TestService_Status_NoGenesisTimeSet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestListenForNewNodes(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
// Setup bootnode.
|
||||
notifier := &mock.MockStateNotifier{}
|
||||
cfg := &Config{StateNotifier: notifier}
|
||||
@@ -241,6 +248,7 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPeer_Disconnect(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
h1, _, _ := createHost(t, 5000)
|
||||
defer func() {
|
||||
if err := h1.Close(); err != nil {
|
||||
@@ -271,6 +279,7 @@ func TestPeer_Disconnect(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_JoinLeaveTopic(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
s, err := NewService(ctx, &Config{StateNotifier: &mock.MockStateNotifier{}})
|
||||
@@ -329,6 +338,7 @@ func initializeStateWithForkDigest(ctx context.Context, t *testing.T, ef *event.
|
||||
}
|
||||
|
||||
func TestService_connectWithPeer(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
peers *peers.Status
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
@@ -24,6 +25,7 @@ import (
|
||||
)
|
||||
|
||||
func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
// This test needs to be entirely rewritten and should be done in a follow up PR from #7885.
|
||||
t.Skip("This test is now failing after PR 7885 due to false positive")
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
@@ -146,6 +148,7 @@ func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_AttSubnets(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
record func(t *testing.T) *enr.Record
|
||||
@@ -330,6 +333,7 @@ func Test_AttSubnets(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_SyncSubnets(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
record func(t *testing.T) *enr.Record
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestInitializeDataMaps(t *testing.T) {
|
||||
{
|
||||
name: "fork version changes",
|
||||
action: func() {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GenesisForkVersion = []byte{0x01, 0x02, 0x00, 0x00}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
},
|
||||
@@ -33,7 +33,7 @@ func TestInitializeDataMaps(t *testing.T) {
|
||||
{
|
||||
name: "fork version changes with reset",
|
||||
action: func() {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GenesisForkVersion = []byte{0x01, 0x02, 0x00, 0x00}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
InitializeDataMaps()
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
// Test `verifyConnectivity` function by trying to connect to google.com (successfully)
|
||||
// and then by connecting to an unreachable IP and ensuring that a log is emitted
|
||||
func TestVerifyConnectivity(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
hook := logTest.NewGlobal()
|
||||
cases := []struct {
|
||||
address string
|
||||
@@ -39,6 +41,7 @@ func TestVerifyConnectivity(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSerializeENR(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
t.Run("Ok", func(t *testing.T) {
|
||||
key, err := crypto.GenerateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -22,7 +22,7 @@ const repeatedSearches = 2 * searchThreshold
|
||||
|
||||
// BlockExists returns true if the block exists, its height and any possible error encountered.
|
||||
func (s *Service) BlockExists(ctx context.Context, hash common.Hash) (bool, *big.Int, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockExists")
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.BlockExists")
|
||||
defer span.End()
|
||||
|
||||
if exists, hdrInfo, err := s.headerCache.HeaderInfoByHash(hash); exists || err != nil {
|
||||
@@ -45,24 +45,9 @@ func (s *Service) BlockExists(ctx context.Context, hash common.Hash) (bool, *big
|
||||
return true, new(big.Int).Set(header.Number), nil
|
||||
}
|
||||
|
||||
// BlockExistsWithCache returns true if the block exists in cache, its height and any possible error encountered.
|
||||
func (s *Service) BlockExistsWithCache(ctx context.Context, hash common.Hash) (bool, *big.Int, error) {
|
||||
_, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockExistsWithCache")
|
||||
defer span.End()
|
||||
if exists, hdrInfo, err := s.headerCache.HeaderInfoByHash(hash); exists || err != nil {
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
span.AddAttributes(trace.BoolAttribute("blockCacheHit", true))
|
||||
return true, hdrInfo.Number, nil
|
||||
}
|
||||
span.AddAttributes(trace.BoolAttribute("blockCacheHit", false))
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
// BlockHashByHeight returns the block hash of the block at the given height.
|
||||
func (s *Service) BlockHashByHeight(ctx context.Context, height *big.Int) (common.Hash, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockHashByHeight")
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.BlockHashByHeight")
|
||||
defer span.End()
|
||||
|
||||
if exists, hInfo, err := s.headerCache.HeaderInfoByHeight(height); exists || err != nil {
|
||||
@@ -90,9 +75,9 @@ func (s *Service) BlockHashByHeight(ctx context.Context, height *big.Int) (commo
|
||||
return header.Hash(), nil
|
||||
}
|
||||
|
||||
// BlockTimeByHeight fetches an eth1.0 block timestamp by its height.
|
||||
// BlockTimeByHeight fetches an eth1 block timestamp by its height.
|
||||
func (s *Service) BlockTimeByHeight(ctx context.Context, height *big.Int) (uint64, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockTimeByHeight")
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.BlockTimeByHeight")
|
||||
defer span.End()
|
||||
if s.eth1DataFetcher == nil {
|
||||
err := errors.New("nil eth1DataFetcher")
|
||||
@@ -111,7 +96,7 @@ func (s *Service) BlockTimeByHeight(ctx context.Context, height *big.Int) (uint6
|
||||
// This is an optimized version with the worst case being O(2*repeatedSearches) number of calls
|
||||
// while in best case search for the block is performed in O(1).
|
||||
func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.HeaderInfo, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockByTimestamp")
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.BlockByTimestamp")
|
||||
defer span.End()
|
||||
|
||||
s.latestEth1DataLock.RLock()
|
||||
@@ -122,15 +107,14 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
|
||||
if time > latestBlkTime {
|
||||
return nil, errors.Errorf("provided time is later than the current eth1 head. %d > %d", time, latestBlkTime)
|
||||
}
|
||||
// Initialize a pointer to eth1 chain's history to start our search
|
||||
// from.
|
||||
// Initialize a pointer to eth1 chain's history to start our search from.
|
||||
cursorNum := big.NewInt(0).SetUint64(latestBlkHeight)
|
||||
cursorTime := latestBlkTime
|
||||
|
||||
numOfBlocks := uint64(0)
|
||||
estimatedBlk := cursorNum.Uint64()
|
||||
maxTimeBuffer := searchThreshold * params.BeaconConfig().SecondsPerETH1Block
|
||||
// Terminate if we cant find an acceptable block after
|
||||
// Terminate if we can't find an acceptable block after
|
||||
// repeated searches.
|
||||
for i := 0; i < repeatedSearches; i++ {
|
||||
if ctx.Err() != nil {
|
||||
@@ -157,12 +141,12 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
|
||||
// time - buffer <= head.time <= time + buffer
|
||||
break
|
||||
}
|
||||
hinfo, err := s.retrieveHeaderInfo(ctx, estimatedBlk)
|
||||
hInfo, err := s.retrieveHeaderInfo(ctx, estimatedBlk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cursorNum = hinfo.Number
|
||||
cursorTime = hinfo.Time
|
||||
cursorNum = hInfo.Number
|
||||
cursorTime = hInfo.Time
|
||||
}
|
||||
|
||||
// Exit early if we get the desired block.
|
||||
@@ -170,15 +154,15 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
|
||||
return s.retrieveHeaderInfo(ctx, cursorNum.Uint64())
|
||||
}
|
||||
if cursorTime > time {
|
||||
return s.findLessTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
|
||||
return s.findMaxTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
|
||||
}
|
||||
return s.findMoreTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
|
||||
return s.findMinTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
|
||||
}
|
||||
|
||||
// Performs a search to find a target eth1 block which is earlier than or equal to the
|
||||
// target time. This method is used when head.time > targetTime
|
||||
func (s *Service) findLessTargetEth1Block(ctx context.Context, startBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
|
||||
for bn := startBlk; ; bn = big.NewInt(0).Sub(bn, big.NewInt(1)) {
|
||||
func (s *Service) findMaxTargetEth1Block(ctx context.Context, upperBoundBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
|
||||
for bn := upperBoundBlk; ; bn = big.NewInt(0).Sub(bn, big.NewInt(1)) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
@@ -194,8 +178,8 @@ func (s *Service) findLessTargetEth1Block(ctx context.Context, startBlk *big.Int
|
||||
|
||||
// Performs a search to find a target eth1 block which is just earlier than or equal to the
|
||||
// target time. This method is used when head.time < targetTime
|
||||
func (s *Service) findMoreTargetEth1Block(ctx context.Context, startBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
|
||||
for bn := startBlk; ; bn = big.NewInt(0).Add(bn, big.NewInt(1)) {
|
||||
func (s *Service) findMinTargetEth1Block(ctx context.Context, lowerBoundBlk *big.Int, targetTime uint64) (*types.HeaderInfo, error) {
|
||||
for bn := lowerBoundBlk; ; bn = big.NewInt(0).Add(bn, big.NewInt(1)) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
@@ -203,8 +187,7 @@ func (s *Service) findMoreTargetEth1Block(ctx context.Context, startBlk *big.Int
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Return the last block before we hit the threshold
|
||||
// time.
|
||||
// Return the last block before we hit the threshold time.
|
||||
if info.Time > targetTime {
|
||||
return s.retrieveHeaderInfo(ctx, info.Number.Uint64()-1)
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ func TestLatestMainchainInfo_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
tickerChan := make(chan time.Time)
|
||||
web3Service.headTicker = &time.Ticker{C: tickerChan}
|
||||
web3Service.eth1HeadTicker = &time.Ticker{C: tickerChan}
|
||||
tickerChan <- time.Now()
|
||||
web3Service.cancel()
|
||||
exitRoutine <- true
|
||||
@@ -207,51 +207,6 @@ func TestBlockExists_UsesCachedBlockInfo(t *testing.T) {
|
||||
require.Equal(t, 0, height.Cmp(header.Number))
|
||||
}
|
||||
|
||||
func TestBlockExistsWithCache_UsesCachedHeaderInfo(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
server, endpoint, err := mockPOW.SetupRPCServer()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
server.Stop()
|
||||
})
|
||||
web3Service, err := NewService(context.Background(),
|
||||
WithHttpEndpoints([]string{endpoint}),
|
||||
WithDatabase(beaconDB),
|
||||
)
|
||||
require.NoError(t, err, "unable to setup web3 ETH1.0 chain service")
|
||||
|
||||
header := &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
}
|
||||
|
||||
err = web3Service.headerCache.AddHeader(header)
|
||||
require.NoError(t, err)
|
||||
|
||||
exists, height, err := web3Service.BlockExistsWithCache(context.Background(), header.Hash())
|
||||
require.NoError(t, err, "Could not get block hash with given height")
|
||||
require.Equal(t, true, exists)
|
||||
require.Equal(t, 0, height.Cmp(header.Number))
|
||||
}
|
||||
|
||||
func TestBlockExistsWithCache_HeaderNotCached(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
server, endpoint, err := mockPOW.SetupRPCServer()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
server.Stop()
|
||||
})
|
||||
web3Service, err := NewService(context.Background(),
|
||||
WithHttpEndpoints([]string{endpoint}),
|
||||
WithDatabase(beaconDB),
|
||||
)
|
||||
require.NoError(t, err, "unable to setup web3 ETH1.0 chain service")
|
||||
|
||||
exists, height, err := web3Service.BlockExistsWithCache(context.Background(), common.BytesToHash([]byte("hash")))
|
||||
require.NoError(t, err, "Could not get block hash with given height")
|
||||
require.Equal(t, false, exists)
|
||||
require.Equal(t, (*big.Int)(nil), height)
|
||||
}
|
||||
|
||||
func TestService_BlockNumberByTimestamp(t *testing.T) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
testAcc, err := mock.Setup()
|
||||
@@ -313,11 +268,11 @@ func TestService_BlockNumberByTimestampLessTargetTime(t *testing.T) {
|
||||
defer cancel()
|
||||
|
||||
// Provide an unattainable target time
|
||||
_, err = web3Service.findLessTargetEth1Block(ctx, hd.Number, hd.Time/2)
|
||||
_, err = web3Service.findMaxTargetEth1Block(ctx, hd.Number, hd.Time/2)
|
||||
require.ErrorContains(t, context.DeadlineExceeded.Error(), err)
|
||||
|
||||
// Provide an attainable target time
|
||||
blk, err := web3Service.findLessTargetEth1Block(context.Background(), hd.Number, hd.Time-5)
|
||||
blk, err := web3Service.findMaxTargetEth1Block(context.Background(), hd.Number, hd.Time-5)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, hd.Number.Uint64(), blk.Number.Uint64(), "retrieved block is not less than the head")
|
||||
}
|
||||
@@ -351,11 +306,11 @@ func TestService_BlockNumberByTimestampMoreTargetTime(t *testing.T) {
|
||||
defer cancel()
|
||||
|
||||
// Provide an unattainable target time with respect to head
|
||||
_, err = web3Service.findMoreTargetEth1Block(ctx, big.NewInt(0).Div(hd.Number, big.NewInt(2)), hd.Time)
|
||||
_, err = web3Service.findMinTargetEth1Block(ctx, big.NewInt(0).Div(hd.Number, big.NewInt(2)), hd.Time)
|
||||
require.ErrorContains(t, context.DeadlineExceeded.Error(), err)
|
||||
|
||||
// Provide an attainable target time with respect to head
|
||||
blk, err := web3Service.findMoreTargetEth1Block(context.Background(), big.NewInt(0).Sub(hd.Number, big.NewInt(5)), hd.Time)
|
||||
blk, err := web3Service.findMinTargetEth1Block(context.Background(), big.NewInt(0).Sub(hd.Number, big.NewInt(5)), hd.Time)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, hd.Number.Uint64(), blk.Number.Uint64(), "retrieved block is not equal to the head")
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ const pubKeyErr = "could not convert bytes to public key"
|
||||
|
||||
func TestDepositContractAddress_EmptyAddress(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.DepositContractAddress = ""
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
@@ -36,7 +36,7 @@ func TestDepositContractAddress_EmptyAddress(t *testing.T) {
|
||||
|
||||
func TestDepositContractAddress_NotHexAddress(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.DepositContractAddress = "abc?!"
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
@@ -138,7 +138,8 @@ func TestProcessDeposit_InvalidPublicKey(t *testing.T) {
|
||||
deposits[0].Proof, err = trie.MerkleProof(0)
|
||||
require.NoError(t, err)
|
||||
|
||||
root := trie.HashTreeRoot()
|
||||
root, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositCount: 1,
|
||||
@@ -178,7 +179,8 @@ func TestProcessDeposit_InvalidSignature(t *testing.T) {
|
||||
trie, err := trie.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err)
|
||||
|
||||
root := trie.HashTreeRoot()
|
||||
root, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositCount: 1,
|
||||
@@ -213,7 +215,8 @@ func TestProcessDeposit_UnableToVerify(t *testing.T) {
|
||||
|
||||
trie, _, err := util.DepositTrieFromDeposits(deposits)
|
||||
require.NoError(t, err)
|
||||
root := trie.HashTreeRoot()
|
||||
root, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositCount: 1,
|
||||
DepositRoot: root[:],
|
||||
@@ -265,7 +268,8 @@ func TestProcessDeposit_IncompleteDeposit(t *testing.T) {
|
||||
|
||||
trie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err)
|
||||
root := trie.HashTreeRoot()
|
||||
root, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositCount: 1,
|
||||
DepositRoot: root[:],
|
||||
@@ -281,7 +285,8 @@ func TestProcessDeposit_IncompleteDeposit(t *testing.T) {
|
||||
for i := 0; i < int(factor-1); i++ {
|
||||
assert.NoError(t, trie.Insert(dataRoot[:], i))
|
||||
|
||||
trieRoot := trie.HashTreeRoot()
|
||||
trieRoot, err := trie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
eth1Data.DepositRoot = trieRoot[:]
|
||||
eth1Data.DepositCount = uint64(i + 1)
|
||||
|
||||
|
||||
@@ -33,6 +33,10 @@ const (
|
||||
ExecutionBlockByHashMethod = "eth_getBlockByHash"
|
||||
// ExecutionBlockByNumberMethod request string for JSON-RPC.
|
||||
ExecutionBlockByNumberMethod = "eth_getBlockByNumber"
|
||||
// Defines the seconds to wait before timing out engine endpoints with block execution semantics (newPayload, forkchoiceUpdated).
|
||||
payloadAndForkchoiceUpdatedTimeout = 8 * time.Second
|
||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||
defaultEngineTimeout = time.Second
|
||||
)
|
||||
|
||||
// ForkchoiceUpdatedResponse is the response kind received by the
|
||||
@@ -65,7 +69,9 @@ func (s *Service) NewPayload(ctx context.Context, payload *pb.ExecutionPayload)
|
||||
defer func() {
|
||||
newPayloadLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
d := time.Now().Add(payloadAndForkchoiceUpdatedTimeout)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
result := &pb.PayloadStatus{}
|
||||
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethod, payload)
|
||||
if err != nil {
|
||||
@@ -75,8 +81,6 @@ func (s *Service) NewPayload(ctx context.Context, payload *pb.ExecutionPayload)
|
||||
switch result.Status {
|
||||
case pb.PayloadStatus_INVALID_BLOCK_HASH:
|
||||
return nil, fmt.Errorf("could not validate block hash: %v", result.ValidationError)
|
||||
case pb.PayloadStatus_INVALID_TERMINAL_BLOCK:
|
||||
return nil, fmt.Errorf("could not satisfy terminal block condition: %v", result.ValidationError)
|
||||
case pb.PayloadStatus_ACCEPTED, pb.PayloadStatus_SYNCING:
|
||||
return nil, ErrAcceptedSyncingPayloadStatus
|
||||
case pb.PayloadStatus_INVALID:
|
||||
@@ -99,6 +103,9 @@ func (s *Service) ForkchoiceUpdated(
|
||||
forkchoiceUpdatedLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
d := time.Now().Add(payloadAndForkchoiceUpdatedTimeout)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
result := &ForkchoiceUpdatedResponse{}
|
||||
err := s.rpcClient.CallContext(ctx, result, ForkchoiceUpdatedMethod, state, attrs)
|
||||
if err != nil {
|
||||
@@ -110,8 +117,6 @@ func (s *Service) ForkchoiceUpdated(
|
||||
}
|
||||
resp := result.Status
|
||||
switch resp.Status {
|
||||
case pb.PayloadStatus_INVALID_TERMINAL_BLOCK:
|
||||
return nil, nil, fmt.Errorf("could not satisfy terminal block condition: %v", resp.ValidationError)
|
||||
case pb.PayloadStatus_SYNCING:
|
||||
return nil, nil, ErrAcceptedSyncingPayloadStatus
|
||||
case pb.PayloadStatus_INVALID:
|
||||
@@ -132,6 +137,9 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte) (*pb.Execut
|
||||
getPayloadLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
d := time.Now().Add(defaultEngineTimeout)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
result := &pb.ExecutionPayload{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethod, pb.PayloadIDBytes(payloadId))
|
||||
return result, handleRPCError(err)
|
||||
@@ -147,10 +155,14 @@ func (s *Service) ExchangeTransitionConfiguration(
|
||||
// We set terminal block number to 0 as the parameter is not set on the consensus layer.
|
||||
zeroBigNum := big.NewInt(0)
|
||||
cfg.TerminalBlockNumber = zeroBigNum.Bytes()
|
||||
d := time.Now().Add(defaultEngineTimeout)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
result := &pb.TransitionConfiguration{}
|
||||
if err := s.rpcClient.CallContext(ctx, result, ExchangeTransitionConfigurationMethod, cfg); err != nil {
|
||||
return handleRPCError(err)
|
||||
}
|
||||
|
||||
// We surface an error to the user if local configuration settings mismatch
|
||||
// according to the response from the execution node.
|
||||
cfgTerminalHash := params.BeaconConfig().TerminalBlockHash[:]
|
||||
|
||||
@@ -217,27 +217,6 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
|
||||
require.DeepEqual(t, []byte(nil), validHash)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethod+" INVALID_TERMINAL_BLOCK status", func(t *testing.T) {
|
||||
forkChoiceState := &pb.ForkchoiceState{
|
||||
HeadBlockHash: []byte("head"),
|
||||
SafeBlockHash: []byte("safe"),
|
||||
FinalizedBlockHash: []byte("finalized"),
|
||||
}
|
||||
payloadAttributes := &pb.PayloadAttributes{
|
||||
Timestamp: 1,
|
||||
PrevRandao: []byte("random"),
|
||||
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
|
||||
}
|
||||
want, ok := fix["ForkchoiceUpdatedInvalidTerminalBlockResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
client := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
|
||||
require.ErrorContains(t, "could not satisfy terminal block condition", err)
|
||||
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
|
||||
require.DeepEqual(t, []byte(nil), validHash)
|
||||
})
|
||||
t.Run(NewPayloadMethod+" VALID status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -274,18 +253,6 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.ErrorContains(t, "could not validate block hash", err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethod+" INVALID_TERMINAL_BLOCK status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["InvalidTerminalBlockStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadSetup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.NewPayload(ctx, execPayload)
|
||||
require.ErrorContains(t, "could not satisfy terminal block condition", err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethod+" INVALID status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -521,7 +488,7 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.TerminalTotalDifficulty = tt.paramsTd
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
var m map[[32]byte]*pb.ExecutionBlock
|
||||
@@ -819,13 +786,6 @@ func fixtures() map[string]interface{} {
|
||||
},
|
||||
PayloadId: &id,
|
||||
}
|
||||
forkChoiceInvalidTerminalBlockResp := &ForkchoiceUpdatedResponse{
|
||||
Status: &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_INVALID_TERMINAL_BLOCK,
|
||||
LatestValidHash: nil,
|
||||
},
|
||||
PayloadId: &id,
|
||||
}
|
||||
forkChoiceAcceptedResp := &ForkchoiceUpdatedResponse{
|
||||
Status: &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_ACCEPTED,
|
||||
@@ -856,10 +816,6 @@ func fixtures() map[string]interface{} {
|
||||
Status: pb.PayloadStatus_INVALID_BLOCK_HASH,
|
||||
LatestValidHash: nil,
|
||||
}
|
||||
inValidTerminalBlockStatus := &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_INVALID_TERMINAL_BLOCK,
|
||||
LatestValidHash: nil,
|
||||
}
|
||||
acceptedStatus := &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_ACCEPTED,
|
||||
LatestValidHash: nil,
|
||||
@@ -877,21 +833,19 @@ func fixtures() map[string]interface{} {
|
||||
LatestValidHash: foo[:],
|
||||
}
|
||||
return map[string]interface{}{
|
||||
"ExecutionBlock": executionBlock,
|
||||
"ExecutionPayload": executionPayloadFixture,
|
||||
"ValidPayloadStatus": validStatus,
|
||||
"InvalidBlockHashStatus": inValidBlockHashStatus,
|
||||
"InvalidTerminalBlockStatus": inValidTerminalBlockStatus,
|
||||
"AcceptedStatus": acceptedStatus,
|
||||
"SyncingStatus": syncingStatus,
|
||||
"InvalidStatus": invalidStatus,
|
||||
"UnknownStatus": unknownStatus,
|
||||
"ForkchoiceUpdatedResponse": forkChoiceResp,
|
||||
"ForkchoiceUpdatedSyncingResponse": forkChoiceSyncingResp,
|
||||
"ForkchoiceUpdatedInvalidTerminalBlockResponse": forkChoiceInvalidTerminalBlockResp,
|
||||
"ForkchoiceUpdatedAcceptedResponse": forkChoiceAcceptedResp,
|
||||
"ForkchoiceUpdatedInvalidResponse": forkChoiceInvalidResp,
|
||||
"TransitionConfiguration": transitionCfg,
|
||||
"ExecutionBlock": executionBlock,
|
||||
"ExecutionPayload": executionPayloadFixture,
|
||||
"ValidPayloadStatus": validStatus,
|
||||
"InvalidBlockHashStatus": inValidBlockHashStatus,
|
||||
"AcceptedStatus": acceptedStatus,
|
||||
"SyncingStatus": syncingStatus,
|
||||
"InvalidStatus": invalidStatus,
|
||||
"UnknownStatus": unknownStatus,
|
||||
"ForkchoiceUpdatedResponse": forkChoiceResp,
|
||||
"ForkchoiceUpdatedSyncingResponse": forkChoiceSyncingResp,
|
||||
"ForkchoiceUpdatedAcceptedResponse": forkChoiceAcceptedResp,
|
||||
"ForkchoiceUpdatedInvalidResponse": forkChoiceInvalidResp,
|
||||
"TransitionConfiguration": transitionCfg,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,8 +19,6 @@ var (
|
||||
ErrUnknownPayload = errors.New("payload does not exist or is not available")
|
||||
// ErrUnknownPayloadStatus when the payload status is unknown.
|
||||
ErrUnknownPayloadStatus = errors.New("unknown payload status")
|
||||
// ErrUnsupportedScheme for unsupported URL schemes.
|
||||
ErrUnsupportedScheme = errors.New("unsupported url scheme, only http(s) and ipc are supported")
|
||||
// ErrConfigMismatch when the execution node's terminal total difficulty or
|
||||
// terminal block hash received via the API mismatches Prysm's configuration value.
|
||||
ErrConfigMismatch = errors.New("execution client configuration mismatch")
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
func init() {
|
||||
// Override network name so that hardcoded genesis files are not loaded.
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.ConfigName = "test"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
if err := params.SetActive(params.MainnetTestConfig()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ var (
|
||||
const eth1DataSavingInterval = 1000
|
||||
const maxTolerableDifference = 50
|
||||
const defaultEth1HeaderReqLimit = uint64(1000)
|
||||
const depositlogRequestLimit = 10000
|
||||
const depositLogRequestLimit = 10000
|
||||
const additiveFactorMultiplier = 0.10
|
||||
const multiplicativeDecreaseDivisor = 2
|
||||
|
||||
@@ -57,7 +57,7 @@ func (s *Service) Eth2GenesisPowchainInfo() (uint64, *big.Int) {
|
||||
return s.chainStartData.GenesisTime, big.NewInt(int64(s.chainStartData.GenesisBlock))
|
||||
}
|
||||
|
||||
// ProcessETH1Block processes the logs from the provided eth1Block.
|
||||
// ProcessETH1Block processes logs from the provided eth1 block.
|
||||
func (s *Service) ProcessETH1Block(ctx context.Context, blkNum *big.Int) error {
|
||||
query := ethereum.FilterQuery{
|
||||
Addresses: []common.Address{
|
||||
@@ -80,7 +80,7 @@ func (s *Service) ProcessETH1Block(ctx context.Context, blkNum *big.Int) error {
|
||||
}
|
||||
}
|
||||
if !s.chainStartData.Chainstarted {
|
||||
if err := s.checkBlockNumberForChainStart(ctx, blkNum); err != nil {
|
||||
if err := s.processChainStartFromBlockNum(ctx, blkNum); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -88,7 +88,7 @@ func (s *Service) ProcessETH1Block(ctx context.Context, blkNum *big.Int) error {
|
||||
}
|
||||
|
||||
// ProcessLog is the main method which handles the processing of all
|
||||
// logs from the deposit contract on the ETH1.0 chain.
|
||||
// logs from the deposit contract on the eth1 chain.
|
||||
func (s *Service) ProcessLog(ctx context.Context, depositLog gethTypes.Log) error {
|
||||
s.processingLock.RLock()
|
||||
defer s.processingLock.RUnlock()
|
||||
@@ -107,7 +107,7 @@ func (s *Service) ProcessLog(ctx context.Context, depositLog gethTypes.Log) erro
|
||||
}
|
||||
|
||||
// ProcessDepositLog processes the log which had been received from
|
||||
// the ETH1.0 chain by trying to ascertain which participant deposited
|
||||
// the eth1 chain by trying to ascertain which participant deposited
|
||||
// in the contract.
|
||||
func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Log) error {
|
||||
pubkey, withdrawalCredentials, amount, signature, merkleTreeIndex, err := contracts.UnpackDepositLogData(depositLog.Data)
|
||||
@@ -140,7 +140,7 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
|
||||
|
||||
depositHash, err := depositData.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Unable to determine hashed value of deposit")
|
||||
return errors.Wrap(err, "unable to determine hashed value of deposit")
|
||||
}
|
||||
|
||||
// Defensive check to validate incoming index.
|
||||
@@ -158,20 +158,27 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
|
||||
if !s.chainStartData.Chainstarted {
|
||||
proof, err := s.depositTrie.MerkleProof(int(index))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Unable to generate merkle proof for deposit")
|
||||
return errors.Wrap(err, "unable to generate merkle proof for deposit")
|
||||
}
|
||||
deposit.Proof = proof
|
||||
}
|
||||
|
||||
// We always store all historical deposits in the DB.
|
||||
err = s.cfg.depositCache.InsertDeposit(ctx, deposit, depositLog.BlockNumber, index, s.depositTrie.HashTreeRoot())
|
||||
root, err := s.depositTrie.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to determine root of deposit trie")
|
||||
}
|
||||
err = s.cfg.depositCache.InsertDeposit(ctx, deposit, depositLog.BlockNumber, index, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to insert deposit into cache")
|
||||
}
|
||||
validData := true
|
||||
if !s.chainStartData.Chainstarted {
|
||||
s.chainStartData.ChainstartDeposits = append(s.chainStartData.ChainstartDeposits, deposit)
|
||||
root := s.depositTrie.HashTreeRoot()
|
||||
root, err := s.depositTrie.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to determine root of deposit trie")
|
||||
}
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
DepositCount: uint64(len(s.chainStartData.ChainstartDeposits)),
|
||||
@@ -181,7 +188,11 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
|
||||
validData = false
|
||||
}
|
||||
} else {
|
||||
s.cfg.depositCache.InsertPendingDeposit(ctx, deposit, depositLog.BlockNumber, index, s.depositTrie.HashTreeRoot())
|
||||
root, err := s.depositTrie.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to determine root of deposit trie")
|
||||
}
|
||||
s.cfg.depositCache.InsertPendingDeposit(ctx, deposit, depositLog.BlockNumber, index, root)
|
||||
}
|
||||
if validData {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -215,7 +226,7 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
|
||||
}
|
||||
|
||||
// ProcessChainStart processes the log which had been received from
|
||||
// the ETH1.0 chain by trying to determine when to start the beacon chain.
|
||||
// the eth1 chain by trying to determine when to start the beacon chain.
|
||||
func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte, blockNumber *big.Int) {
|
||||
s.chainStartData.Chainstarted = true
|
||||
s.chainStartData.GenesisBlock = blockNumber.Uint64()
|
||||
@@ -225,12 +236,16 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
|
||||
for i := range s.chainStartData.ChainstartDeposits {
|
||||
proof, err := s.depositTrie.MerkleProof(i)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to generate deposit proof %v", err)
|
||||
log.Errorf("unable to generate deposit proof %v", err)
|
||||
}
|
||||
s.chainStartData.ChainstartDeposits[i].Proof = proof
|
||||
}
|
||||
|
||||
root := s.depositTrie.HashTreeRoot()
|
||||
root, err := s.depositTrie.HashTreeRoot()
|
||||
if err != nil { // This should never happen.
|
||||
log.WithError(err).Error("unable to determine root of deposit trie, aborting chain start")
|
||||
return
|
||||
}
|
||||
s.chainStartData.Eth1Data = ðpb.Eth1Data{
|
||||
DepositCount: uint64(len(s.chainStartData.ChainstartDeposits)),
|
||||
DepositRoot: root[:],
|
||||
@@ -247,15 +262,15 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
|
||||
},
|
||||
})
|
||||
if err := s.savePowchainData(s.ctx); err != nil {
|
||||
// continue on, if the save fails as this will get re-saved
|
||||
// continue on if the save fails as this will get re-saved
|
||||
// in the next interval.
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// createGenesisTime adds in the genesis delay to the eth1 block time
|
||||
// on which it was triggered.
|
||||
func createGenesisTime(timeStamp uint64) uint64 {
|
||||
// adds in the genesis delay to the eth1 block time
|
||||
// on which it was triggered.
|
||||
return timeStamp + params.BeaconConfig().GenesisDelay
|
||||
}
|
||||
|
||||
@@ -292,7 +307,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
latestFollowHeight, err := s.followBlockHeight(ctx)
|
||||
latestFollowHeight, err := s.followedBlockHeight(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -318,7 +333,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
remainingLogs := logCount - uint64(s.lastReceivedMerkleIndex+1)
|
||||
// only change the end block if the remaining logs are below the required log limit.
|
||||
// reset our query and end block in this case.
|
||||
withinLimit := remainingLogs < depositlogRequestLimit
|
||||
withinLimit := remainingLogs < depositLogRequestLimit
|
||||
aboveFollowHeight := end >= latestFollowHeight
|
||||
if withinLimit && aboveFollowHeight {
|
||||
query.ToBlock = big.NewInt(0).SetUint64(latestFollowHeight)
|
||||
@@ -413,9 +428,9 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
// logs from the period last polled to now.
|
||||
func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
|
||||
// We request for the nth block behind the current head, in order to have
|
||||
// stabilized logs when we retrieve it from the 1.0 chain.
|
||||
// stabilized logs when we retrieve it from the eth1 chain.
|
||||
|
||||
requestedBlock, err := s.followBlockHeight(ctx)
|
||||
requestedBlock, err := s.followedBlockHeight(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -457,18 +472,17 @@ func (s *Service) retrieveBlockHashAndTime(ctx context.Context, blkNum *big.Int)
|
||||
return bHash, timeStamp, nil
|
||||
}
|
||||
|
||||
// checkBlockNumberForChainStart checks the given block number for if chainstart has occurred.
|
||||
func (s *Service) checkBlockNumberForChainStart(ctx context.Context, blkNum *big.Int) error {
|
||||
func (s *Service) processChainStartFromBlockNum(ctx context.Context, blkNum *big.Int) error {
|
||||
bHash, timeStamp, err := s.retrieveBlockHashAndTime(ctx, blkNum)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.checkForChainstart(ctx, bHash, blkNum, timeStamp)
|
||||
s.processChainStartIfReady(ctx, bHash, blkNum, timeStamp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) checkHeaderForChainstart(ctx context.Context, header *gethTypes.Header) {
|
||||
s.checkForChainstart(ctx, header.Hash(), header.Number, header.Time)
|
||||
func (s *Service) processChainStartFromHeader(ctx context.Context, header *gethTypes.Header) {
|
||||
s.processChainStartIfReady(ctx, header.Hash(), header.Number, header.Time)
|
||||
}
|
||||
|
||||
func (s *Service) checkHeaderRange(ctx context.Context, start, end uint64, headersMap map[uint64]*gethTypes.Header,
|
||||
@@ -484,7 +498,7 @@ func (s *Service) checkHeaderRange(ctx context.Context, start, end uint64, heade
|
||||
i--
|
||||
continue
|
||||
}
|
||||
s.checkHeaderForChainstart(ctx, h)
|
||||
s.processChainStartFromHeader(ctx, h)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -504,7 +518,7 @@ func (s *Service) currentCountAndTime(ctx context.Context, blockTime uint64) (ui
|
||||
return valCount, createGenesisTime(blockTime)
|
||||
}
|
||||
|
||||
func (s *Service) checkForChainstart(ctx context.Context, blockHash [32]byte, blockNumber *big.Int, blockTime uint64) {
|
||||
func (s *Service) processChainStartIfReady(ctx context.Context, blockHash [32]byte, blockNumber *big.Int, blockTime uint64) {
|
||||
valCount, genesisTime := s.currentCountAndTime(ctx, blockTime)
|
||||
if valCount == 0 {
|
||||
return
|
||||
@@ -516,7 +530,7 @@ func (s *Service) checkForChainstart(ctx context.Context, blockHash [32]byte, bl
|
||||
}
|
||||
}
|
||||
|
||||
// save all powchain related metadata to disk.
|
||||
// savePowchainData saves all powchain related metadata to disk.
|
||||
func (s *Service) savePowchainData(ctx context.Context) error {
|
||||
var pbState *ethpb.BeaconState
|
||||
var err error
|
||||
|
||||
@@ -236,7 +236,7 @@ func TestProcessETH2GenesisLog_8DuplicatePubkeys(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bConfig := params.MinimalSpecConfig()
|
||||
bConfig := params.MinimalSpecConfig().Copy()
|
||||
bConfig.MinGenesisTime = 0
|
||||
params.OverrideBeaconConfig(bConfig)
|
||||
|
||||
@@ -284,7 +284,7 @@ func TestProcessETH2GenesisLog_8DuplicatePubkeys(t *testing.T) {
|
||||
|
||||
func TestProcessETH2GenesisLog(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GenesisDelay = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
hook := logTest.NewGlobal()
|
||||
@@ -310,7 +310,7 @@ func TestProcessETH2GenesisLog(t *testing.T) {
|
||||
web3Service.depositContractCaller, err = contracts.NewDepositContractCaller(testAcc.ContractAddr, testAcc.Backend)
|
||||
require.NoError(t, err)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bConfig := params.MinimalSpecConfig()
|
||||
bConfig := params.MinimalSpecConfig().Copy()
|
||||
bConfig.MinGenesisTime = 0
|
||||
params.OverrideBeaconConfig(bConfig)
|
||||
|
||||
@@ -378,6 +378,7 @@ func TestProcessETH2GenesisLog(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
hook := logTest.NewGlobal()
|
||||
testAcc, err := mock.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
@@ -406,8 +407,7 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
|
||||
web3Service.latestEth1Data.LastRequestedBlock = 0
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bConfig := params.MinimalSpecConfig()
|
||||
bConfig := params.MinimalSpecConfig().Copy()
|
||||
bConfig.MinGenesisTime = 0
|
||||
bConfig.SecondsPerETH1Block = 10
|
||||
params.OverrideBeaconConfig(bConfig)
|
||||
@@ -476,6 +476,7 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
hook := logTest.NewGlobal()
|
||||
testAcc, err := mock.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
@@ -504,8 +505,7 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
|
||||
web3Service.latestEth1Data.LastRequestedBlock = 0
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bConfig := params.MinimalSpecConfig()
|
||||
bConfig := params.MinimalSpecConfig().Copy()
|
||||
bConfig.SecondsPerETH1Block = 10
|
||||
params.OverrideBeaconConfig(bConfig)
|
||||
nConfig := params.BeaconNetworkConfig()
|
||||
@@ -552,7 +552,7 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
|
||||
|
||||
// Set the genesis time 500 blocks ahead of the last
|
||||
// deposit log.
|
||||
bConfig = params.MinimalSpecConfig()
|
||||
bConfig = params.MinimalSpecConfig().Copy()
|
||||
bConfig.MinGenesisTime = wantedGenesisTime - 10
|
||||
params.OverrideBeaconConfig(bConfig)
|
||||
|
||||
@@ -589,7 +589,7 @@ func TestCheckForChainstart_NoValidator(t *testing.T) {
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := newPowchainService(t, testAcc, beaconDB)
|
||||
s.checkForChainstart(context.Background(), [32]byte{}, nil, 0)
|
||||
s.processChainStartIfReady(context.Background(), [32]byte{}, nil, 0)
|
||||
require.LogsDoNotContain(t, hook, "Could not determine active validator count from pre genesis state")
|
||||
}
|
||||
|
||||
@@ -616,7 +616,7 @@ func newPowchainService(t *testing.T, eth1Backend *mock.TestAccount, beaconDB db
|
||||
web3Service.eth1DataFetcher = &goodFetcher{backend: eth1Backend.Backend}
|
||||
web3Service.httpLogger = &goodLogger{backend: eth1Backend.Backend}
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bConfig := params.MinimalSpecConfig()
|
||||
bConfig := params.MinimalSpecConfig().Copy()
|
||||
bConfig.MinGenesisTime = 0
|
||||
params.OverrideBeaconConfig(bConfig)
|
||||
return web3Service
|
||||
|
||||
@@ -70,7 +70,9 @@ func (s *Service) pollConnectionStatus(ctx context.Context) {
|
||||
continue
|
||||
}
|
||||
// Close previous client, if connection was successful.
|
||||
currClient.Close()
|
||||
if currClient != nil {
|
||||
currClient.Close()
|
||||
}
|
||||
log.Infof("Connected to new endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
|
||||
return
|
||||
case <-s.ctx.Done():
|
||||
@@ -92,7 +94,9 @@ func (s *Service) retryExecutionClientConnection(ctx context.Context, err error)
|
||||
return
|
||||
}
|
||||
// Close previous client, if connection was successful.
|
||||
currClient.Close()
|
||||
if currClient != nil {
|
||||
currClient.Close()
|
||||
}
|
||||
// Reset run error in the event of a successful connection.
|
||||
s.runError = nil
|
||||
}
|
||||
@@ -114,7 +118,9 @@ func (s *Service) checkDefaultEndpoint(ctx context.Context) {
|
||||
return
|
||||
}
|
||||
// Close previous client, if connection was successful.
|
||||
currClient.Close()
|
||||
if currClient != nil {
|
||||
currClient.Close()
|
||||
}
|
||||
s.updateCurrHttpEndpoint(primaryEndpoint)
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
@@ -98,7 +97,6 @@ type POWBlockFetcher interface {
|
||||
BlockByTimestamp(ctx context.Context, time uint64) (*types.HeaderInfo, error)
|
||||
BlockHashByHeight(ctx context.Context, height *big.Int) (common.Hash, error)
|
||||
BlockExists(ctx context.Context, hash common.Hash) (bool, *big.Int, error)
|
||||
BlockExistsWithCache(ctx context.Context, hash common.Hash) (bool, *big.Int, error)
|
||||
}
|
||||
|
||||
// Chain defines a standard interface for the powchain service in Prysm.
|
||||
@@ -114,7 +112,6 @@ type RPCDataFetcher interface {
|
||||
Close()
|
||||
HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error)
|
||||
HeaderByHash(ctx context.Context, hash common.Hash) (*gethTypes.Header, error)
|
||||
SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error)
|
||||
}
|
||||
|
||||
// RPCClient defines the rpc methods required to interact with the eth1 node.
|
||||
@@ -139,10 +136,10 @@ type config struct {
|
||||
}
|
||||
|
||||
// Service fetches important information about the canonical
|
||||
// Ethereum ETH1.0 chain via a web3 endpoint using an ethclient. The Random
|
||||
// Beacon Chain requires synchronization with the ETH1.0 chain's current
|
||||
// blockhash, block number, and access to logs within the
|
||||
// Validator Registration Contract on the ETH1.0 chain to kick off the beacon
|
||||
// eth1 chain via a web3 endpoint using an ethclient.
|
||||
// The beacon chain requires synchronization with the eth1 chain's current
|
||||
// block hash, block number, and access to logs within the
|
||||
// Validator Registration Contract on the eth1 chain to kick off the beacon
|
||||
// chain's validator registration process.
|
||||
type Service struct {
|
||||
connectedETH1 bool
|
||||
@@ -152,7 +149,7 @@ type Service struct {
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
headTicker *time.Ticker
|
||||
eth1HeadTicker *time.Ticker
|
||||
httpLogger bind.ContractFilterer
|
||||
eth1DataFetcher RPCDataFetcher
|
||||
rpcClient RPCClient
|
||||
@@ -173,11 +170,11 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
depositTrie, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, errors.Wrap(err, "could not setup deposit trie")
|
||||
return nil, errors.Wrap(err, "could not set up deposit trie")
|
||||
}
|
||||
genState, err := transition.EmptyGenesisState()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not setup genesis state")
|
||||
return nil, errors.Wrap(err, "could not set up genesis state")
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
@@ -201,7 +198,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
},
|
||||
lastReceivedMerkleIndex: -1,
|
||||
preGenesisState: genState,
|
||||
headTicker: time.NewTicker(time.Duration(params.BeaconConfig().SecondsPerETH1Block) * time.Second),
|
||||
eth1HeadTicker: time.NewTicker(time.Duration(params.BeaconConfig().SecondsPerETH1Block) * time.Second),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
@@ -225,7 +222,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Start a web3 service's main event loop.
|
||||
// Start the powchain service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
if err := s.setupExecutionClientConnections(s.ctx, s.cfg.currHttpEndpoint); err != nil {
|
||||
log.WithError(err).Error("Could not connect to execution endpoint")
|
||||
@@ -372,7 +369,7 @@ func (s *Service) ETH1ConnectionErrors() []error {
|
||||
|
||||
// refers to the latest eth1 block which follows the condition: eth1_timestamp +
|
||||
// SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= current_unix_time
|
||||
func (s *Service) followBlockHeight(_ context.Context) (uint64, error) {
|
||||
func (s *Service) followedBlockHeight(_ context.Context) (uint64, error) {
|
||||
latestValidBlock := uint64(0)
|
||||
if s.latestEth1Data.BlockHeight > params.BeaconConfig().Eth1FollowDistance {
|
||||
latestValidBlock = s.latestEth1Data.BlockHeight - params.BeaconConfig().Eth1FollowDistance
|
||||
@@ -386,8 +383,7 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
|
||||
}
|
||||
s.cfg.depositCache.InsertDepositContainers(ctx, ctrs)
|
||||
if !s.chainStartData.Chainstarted {
|
||||
// do not add to pending cache
|
||||
// if no genesis state exists.
|
||||
// Do not add to pending cache if no genesis state exists.
|
||||
validDepositsCount.Add(float64(s.preGenesisState.Eth1DepositIndex()))
|
||||
return nil
|
||||
}
|
||||
@@ -395,7 +391,7 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Default to all deposits post-genesis deposits in
|
||||
// Default to all post-genesis deposits in
|
||||
// the event we cannot find a finalized state.
|
||||
currIndex := genesisState.Eth1DepositIndex()
|
||||
chkPt, err := s.cfg.beaconDB.FinalizedCheckpoint(ctx)
|
||||
@@ -411,17 +407,17 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
|
||||
// Set deposit index to the one in the current archived state.
|
||||
currIndex = fState.Eth1DepositIndex()
|
||||
|
||||
// when a node pauses for some time and starts again, the deposits to finalize
|
||||
// accumulates. we finalize them here before we are ready to receive a block.
|
||||
// When a node pauses for some time and starts again, the deposits to finalize
|
||||
// accumulates. We finalize them here before we are ready to receive a block.
|
||||
// Otherwise, the first few blocks will be slower to compute as we will
|
||||
// hold the lock and be busy finalizing the deposits.
|
||||
// The deposit index in the state is always the index of the next deposit
|
||||
// to be included(rather than the last one to be processed). This was most likely
|
||||
// to be included (rather than the last one to be processed). This was most likely
|
||||
// done as the state cannot represent signed integers.
|
||||
actualIndex := int64(currIndex) - 1 // lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
|
||||
s.cfg.depositCache.InsertFinalizedDeposits(ctx, actualIndex)
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
if err = s.cfg.depositCache.PruneProofs(ctx, actualIndex); err != nil {
|
||||
return errors.Wrap(err, "could not prune deposit proofs")
|
||||
}
|
||||
@@ -498,8 +494,7 @@ func (s *Service) batchRequestHeaders(startBlock, endBlock uint64) ([]*gethTypes
|
||||
return headers, nil
|
||||
}
|
||||
|
||||
// safelyHandleHeader will recover and log any panic that occurs from the
|
||||
// block
|
||||
// safelyHandleHeader will recover and log any panic that occurs from the block
|
||||
func safelyHandlePanic() {
|
||||
if r := recover(); r != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -522,7 +517,7 @@ func (s *Service) handleETH1FollowDistance() {
|
||||
log.Warn("Execution client is not syncing")
|
||||
}
|
||||
if !s.chainStartData.Chainstarted {
|
||||
if err := s.checkBlockNumberForChainStart(ctx, big.NewInt(int64(s.latestEth1Data.LastRequestedBlock))); err != nil {
|
||||
if err := s.processChainStartFromBlockNum(ctx, big.NewInt(int64(s.latestEth1Data.LastRequestedBlock))); err != nil {
|
||||
s.runError = err
|
||||
log.Error(err)
|
||||
return
|
||||
@@ -530,9 +525,8 @@ func (s *Service) handleETH1FollowDistance() {
|
||||
}
|
||||
// If the last requested block has not changed,
|
||||
// we do not request batched logs as this means there are no new
|
||||
// logs for the powchain service to process. Also is a potential
|
||||
// failure condition as would mean we have not respected the protocol
|
||||
// threshold.
|
||||
// logs for the powchain service to process. Also it is a potential
|
||||
// failure condition as would mean we have not respected the protocol threshold.
|
||||
if s.latestEth1Data.LastRequestedBlock == s.latestEth1Data.BlockHeight {
|
||||
log.Error("Beacon node is not respecting the follow distance")
|
||||
return
|
||||
@@ -595,7 +589,7 @@ func (s *Service) initPOWService() {
|
||||
if s.chainStartData.Chainstarted && s.chainStartData.GenesisBlock == 0 {
|
||||
genHash := common.BytesToHash(s.chainStartData.Eth1Data.BlockHash)
|
||||
genBlock := s.chainStartData.GenesisBlock
|
||||
// In the event our provided chainstart data references a non-existent blockhash
|
||||
// In the event our provided chainstart data references a non-existent block hash,
|
||||
// we assume the genesis block to be 0.
|
||||
if genHash != [32]byte{} {
|
||||
genHeader, err := s.eth1DataFetcher.HeaderByHash(ctx, genHash)
|
||||
@@ -618,7 +612,7 @@ func (s *Service) initPOWService() {
|
||||
}
|
||||
}
|
||||
|
||||
// run subscribes to all the services for the ETH1.0 chain.
|
||||
// run subscribes to all the services for the eth1 chain.
|
||||
func (s *Service) run(done <-chan struct{}) {
|
||||
s.runError = nil
|
||||
|
||||
@@ -636,7 +630,7 @@ func (s *Service) run(done <-chan struct{}) {
|
||||
s.updateConnectedETH1(false)
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case <-s.headTicker.C:
|
||||
case <-s.eth1HeadTicker.C:
|
||||
head, err := s.eth1DataFetcher.HeaderByNumber(s.ctx, nil)
|
||||
if err != nil {
|
||||
s.pollConnectionStatus(s.ctx)
|
||||
@@ -692,10 +686,10 @@ func (s *Service) logTillChainStart(ctx context.Context) {
|
||||
}
|
||||
|
||||
// cacheHeadersForEth1DataVote makes sure that voting for eth1data after startup utilizes cached headers
|
||||
// instead of making multiple RPC requests to the ETH1 endpoint.
|
||||
// instead of making multiple RPC requests to the eth1 endpoint.
|
||||
func (s *Service) cacheHeadersForEth1DataVote(ctx context.Context) error {
|
||||
// Find the end block to request from.
|
||||
end, err := s.followBlockHeight(ctx)
|
||||
end, err := s.followedBlockHeight(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -739,12 +733,12 @@ func (s *Service) cacheBlockHeaders(start, end uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// determines the earliest voting block from which to start caching all our previous headers from.
|
||||
// Determines the earliest voting block from which to start caching all our previous headers from.
|
||||
func (s *Service) determineEarliestVotingBlock(ctx context.Context, followBlock uint64) (uint64, error) {
|
||||
genesisTime := s.chainStartData.GenesisTime
|
||||
currSlot := slots.CurrentSlot(genesisTime)
|
||||
|
||||
// In the event genesis has not occurred yet, we just request go back follow_distance blocks.
|
||||
// In the event genesis has not occurred yet, we just request to go back follow_distance blocks.
|
||||
if genesisTime == 0 || currSlot == 0 {
|
||||
earliestBlk := uint64(0)
|
||||
if followBlock > params.BeaconConfig().Eth1FollowDistance {
|
||||
@@ -773,9 +767,12 @@ func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ET
|
||||
if eth1DataInDB == nil {
|
||||
return nil
|
||||
}
|
||||
s.depositTrie = trie.CreateTrieFromProto(eth1DataInDB.Trie)
|
||||
s.chainStartData = eth1DataInDB.ChainstartData
|
||||
var err error
|
||||
s.depositTrie, err = trie.CreateTrieFromProto(eth1DataInDB.Trie)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.chainStartData = eth1DataInDB.ChainstartData
|
||||
if !reflect.ValueOf(eth1DataInDB.BeaconState).IsZero() {
|
||||
s.preGenesisState, err = v1.InitializeFromProto(eth1DataInDB.BeaconState)
|
||||
if err != nil {
|
||||
@@ -791,7 +788,7 @@ func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ET
|
||||
return nil
|
||||
}
|
||||
|
||||
// validates that all deposit containers are valid and have their relevant indices
|
||||
// Validates that all deposit containers are valid and have their relevant indices
|
||||
// in order.
|
||||
func validateDepositContainers(ctrs []*ethpb.DepositContainer) bool {
|
||||
ctrLen := len(ctrs)
|
||||
@@ -814,7 +811,7 @@ func validateDepositContainers(ctrs []*ethpb.DepositContainer) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// validates the current powchain data saved and makes sure that any
|
||||
// Validates the current powchain data is saved and makes sure that any
|
||||
// embedded genesis state is correctly accounted for.
|
||||
func (s *Service) ensureValidPowchainData(ctx context.Context) error {
|
||||
genState, err := s.cfg.beaconDB.GenesisState(ctx)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user