mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
135 Commits
test_appro
...
v2.1.4-rc.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a7c9c76b18 | ||
|
|
5a4edf897f | ||
|
|
2d6b157eea | ||
|
|
32745b5484 | ||
|
|
bfdaf2ec5a | ||
|
|
39c343bcab | ||
|
|
de1ecf2d60 | ||
|
|
7aee67af90 | ||
|
|
9830ce43d6 | ||
|
|
63a8690140 | ||
|
|
7978a0269b | ||
|
|
021df67fdc | ||
|
|
f20e6351f5 | ||
|
|
a2e834a683 | ||
|
|
da25624b1d | ||
|
|
1a5dd879c5 | ||
|
|
65a9ede2d3 | ||
|
|
4b083c2ca9 | ||
|
|
82ae4caca8 | ||
|
|
e770f0fe1f | ||
|
|
30b8fba2ac | ||
|
|
5d94fd48ca | ||
|
|
176d763091 | ||
|
|
97dc9ebbc8 | ||
|
|
6a74dcf35b | ||
|
|
a775798d89 | ||
|
|
e2442bb0a6 | ||
|
|
2669006694 | ||
|
|
5722a5793c | ||
|
|
96aba590b5 | ||
|
|
2162ffb05f | ||
|
|
d41805a39c | ||
|
|
819632dfc5 | ||
|
|
10fffa6e7c | ||
|
|
5cda86bb93 | ||
|
|
f2dcc9a570 | ||
|
|
63354b5bb7 | ||
|
|
e48f0aef41 | ||
|
|
ab1defc5de | ||
|
|
b0e15f3c8f | ||
|
|
e01a898264 | ||
|
|
7c30533870 | ||
|
|
7654ffdcfc | ||
|
|
d6031ac386 | ||
|
|
f7d3b5c510 | ||
|
|
c0f3946f58 | ||
|
|
c33acde64e | ||
|
|
8310d22a05 | ||
|
|
3060096233 | ||
|
|
5d06c14cec | ||
|
|
57abf02e34 | ||
|
|
44218a9c5b | ||
|
|
d53f2c7661 | ||
|
|
5e53d6976e | ||
|
|
7fd2c08be3 | ||
|
|
6695e7c756 | ||
|
|
aeede2165d | ||
|
|
67a15183ca | ||
|
|
208dc80702 | ||
|
|
e80806d455 | ||
|
|
80d0a82f9b | ||
|
|
f9b3cde005 | ||
|
|
d8f9750387 | ||
|
|
5e8b9dde5b | ||
|
|
c2caff4230 | ||
|
|
b67c885995 | ||
|
|
60ed488428 | ||
|
|
2f0e2886b4 | ||
|
|
2d53ae79df | ||
|
|
ce277cb388 | ||
|
|
c9a366c36a | ||
|
|
3a957c567f | ||
|
|
77a63f871d | ||
|
|
8dd8ccc147 | ||
|
|
3c48bce3a3 | ||
|
|
0ed5007d2e | ||
|
|
65900115fc | ||
|
|
379bed9268 | ||
|
|
2dd2e74369 | ||
|
|
73237826d3 | ||
|
|
af4d0c84c8 | ||
|
|
c68f1883d6 | ||
|
|
ae1685d937 | ||
|
|
2a5f05bc29 | ||
|
|
49e5e73ec0 | ||
|
|
2ecb905ae5 | ||
|
|
d4e7da8200 | ||
|
|
4b042a7103 | ||
|
|
e59859c78f | ||
|
|
6bcc7d3a5e | ||
|
|
7b597bb130 | ||
|
|
2c7b273260 | ||
|
|
8bedaaf0a8 | ||
|
|
69350a6a80 | ||
|
|
93e8c749f8 | ||
|
|
96fecf8c57 | ||
|
|
5d29ca4984 | ||
|
|
43523c0266 | ||
|
|
8ebbde7836 | ||
|
|
44c39a0b40 | ||
|
|
f376f3fb9b | ||
|
|
8510743406 | ||
|
|
b82e2e7d40 | ||
|
|
acfafd3f0d | ||
|
|
13001cd000 | ||
|
|
5291b9d85b | ||
|
|
7747471624 | ||
|
|
31f96a05b3 | ||
|
|
dfe8e54a42 | ||
|
|
3a841a8467 | ||
|
|
7f56ac6355 | ||
|
|
9216be7d43 | ||
|
|
7c489199bf | ||
|
|
f8b4d8c57a | ||
|
|
b7463d0070 | ||
|
|
2b6e86ec1b | ||
|
|
4e604ee22b | ||
|
|
3576d2ccfe | ||
|
|
4b009ed813 | ||
|
|
7faad27f5f | ||
|
|
2d3966bf4f | ||
|
|
58d10e3ace | ||
|
|
88becdc114 | ||
|
|
20b4c60bcb | ||
|
|
0b50ab7832 | ||
|
|
6b55d33ea2 | ||
|
|
7e64a3963d | ||
|
|
55b4af9f92 | ||
|
|
546bb5ed53 | ||
|
|
29a25b3f09 | ||
|
|
56af079aea | ||
|
|
385f101b2b | ||
|
|
b3c3b207c9 | ||
|
|
e0cd10ed3c | ||
|
|
2d0fdf8b4a |
@@ -19,7 +19,7 @@ linters:
|
||||
linters-settings:
|
||||
gocognit:
|
||||
# TODO: We should target for < 50
|
||||
min-complexity: 97
|
||||
min-complexity: 65
|
||||
|
||||
output:
|
||||
print-issued-lines: true
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.2.0-rc.1)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-alpha.9/src/engine)
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
[](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
|
||||
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -309,9 +309,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "4e8a18b21d056c4032605621b1a6632198eabab57cb90c61e273f344c287f1b2",
|
||||
strip_prefix = "eth2-networks-791a5369c5981e829698b17fbcdcdacbdaba97c8",
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/791a5369c5981e829698b17fbcdcdacbdaba97c8.tar.gz",
|
||||
sha256 = "126b615e3853e29b61f082f6c89c8bc1c38cd92fb84b0004396fc49e7acc8d9f",
|
||||
strip_prefix = "eth2-networks-f3ccbe0cf5798d5cd23e4e6e7119aefa043c0935",
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/f3ccbe0cf5798d5cd23e4e6e7119aefa043c0935.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -342,9 +342,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "4797a7e594a5b1f4c1c8080701613f3ee451b01ec0861499ea7d9b60877a6b23",
|
||||
sha256 = "98013b40922e54a64996da49b939e0a88fe2456f68eedc5aee4ceba0f8623f71",
|
||||
urls = [
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.3/prysm-web-ui.tar.gz",
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.0/prysm-web-ui.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -12,11 +12,13 @@ go_library(
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -36,6 +38,7 @@ go_test(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -12,12 +12,13 @@ import (
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/tracing"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -128,37 +129,51 @@ func (c *Client) NodeURL() string {
|
||||
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
// do is a generic, opinionated GET function to reduce boilerplate amongst the getters in this packageapi/client/builder/types.go.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) ([]byte, error) {
|
||||
// do is a generic, opinionated request function to reduce boilerplate amongst the methods in this package api/client/builder/types.go.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) (res []byte, err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.client.do")
|
||||
defer func() {
|
||||
tracing.AnnotateError(span, err)
|
||||
span.End()
|
||||
}()
|
||||
|
||||
u := c.baseURL.ResolveReference(&url.URL{Path: path})
|
||||
log.Printf("requesting %s", u.String())
|
||||
|
||||
span.AddAttributes(trace.StringAttribute("url", u.String()),
|
||||
trace.StringAttribute("method", method))
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, method, u.String(), body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(req)
|
||||
}
|
||||
for _, o := range c.obvs {
|
||||
if err := o.observe(req); err != nil {
|
||||
return nil, err
|
||||
if err = o.observe(req); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
r, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
err = r.Body.Close()
|
||||
closeErr := r.Body.Close()
|
||||
if closeErr != nil {
|
||||
log.WithError(closeErr).Error("Failed to close response body")
|
||||
}
|
||||
}()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return nil, non200Err(r)
|
||||
err = non200Err(r)
|
||||
return
|
||||
}
|
||||
b, err := io.ReadAll(r.Body)
|
||||
res, err = io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error reading http response body from GetBlock")
|
||||
err = errors.Wrap(err, "error reading http response body from builder server")
|
||||
return
|
||||
}
|
||||
return b, nil
|
||||
return
|
||||
}
|
||||
|
||||
var execHeaderTemplate = template.Must(template.New("").Parse(getExecHeaderPath))
|
||||
@@ -201,8 +216,14 @@ func (c *Client) GetHeader(ctx context.Context, slot types.Slot, parentHash [32]
|
||||
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
|
||||
// fields with 0x prefixes) and posts to the builder validator registration endpoint.
|
||||
func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.client.RegisterValidator")
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.Int64Attribute("num_reqs", int64(len(svr))))
|
||||
|
||||
if len(svr) == 0 {
|
||||
return errors.Wrap(errMalformedRequest, "empty validator registration list")
|
||||
err := errors.Wrap(errMalformedRequest, "empty validator registration list")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
vs := make([]*SignedValidatorRegistration, len(svr))
|
||||
for i := 0; i < len(svr); i++ {
|
||||
@@ -210,8 +231,11 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
}
|
||||
body, err := json.Marshal(vs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error encoding the SignedValidatorRegistration value body in RegisterValidator")
|
||||
err := errors.Wrap(err, "error encoding the SignedValidatorRegistration value body in RegisterValidator")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body))
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
@@ -300,7 +301,7 @@ func testSignedBlindedBeaconBlockBellatrix(t *testing.T) *eth.SignedBlindedBeaco
|
||||
SyncCommitteeSignature: make([]byte, 48),
|
||||
SyncCommitteeBits: bitfield.Bitvector512{0x01},
|
||||
},
|
||||
ExecutionPayloadHeader: ð.ExecutionPayloadHeader{
|
||||
ExecutionPayloadHeader: &v1.ExecutionPayloadHeader{
|
||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
|
||||
@@ -31,6 +31,22 @@ func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||
if r.SignedValidatorRegistrationV1 == nil {
|
||||
r.SignedValidatorRegistrationV1 = ð.SignedValidatorRegistrationV1{}
|
||||
}
|
||||
o := struct {
|
||||
Message *ValidatorRegistration `json:"message,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{}
|
||||
if err := json.Unmarshal(b, &o); err != nil {
|
||||
return err
|
||||
}
|
||||
r.Message = o.Message.ValidatorRegistrationV1
|
||||
r.Signature = o.Signature
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
|
||||
@@ -45,6 +61,33 @@ func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
func (r *ValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||
if r.ValidatorRegistrationV1 == nil {
|
||||
r.ValidatorRegistrationV1 = ð.ValidatorRegistrationV1{}
|
||||
}
|
||||
o := struct {
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
|
||||
GasLimit string `json:"gas_limit,omitempty"`
|
||||
Timestamp string `json:"timestamp,omitempty"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
|
||||
}{}
|
||||
if err := json.Unmarshal(b, &o); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.FeeRecipient = o.FeeRecipient
|
||||
r.Pubkey = o.Pubkey
|
||||
var err error
|
||||
if r.GasLimit, err = strconv.ParseUint(o.GasLimit, 10, 64); err != nil {
|
||||
return errors.Wrap(err, "failed to parse gas limit")
|
||||
}
|
||||
if r.Timestamp, err = strconv.ParseUint(o.Timestamp, 10, 64); err != nil {
|
||||
return errors.Wrap(err, "failed to parse timestamp")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type Uint256 struct {
|
||||
*big.Int
|
||||
}
|
||||
@@ -149,8 +192,8 @@ func (bb *BuilderBid) ToProto() (*eth.BuilderBid, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *ExecutionPayloadHeader) ToProto() (*eth.ExecutionPayloadHeader, error) {
|
||||
return ð.ExecutionPayloadHeader{
|
||||
func (h *ExecutionPayloadHeader) ToProto() (*v1.ExecutionPayloadHeader, error) {
|
||||
return &v1.ExecutionPayloadHeader{
|
||||
ParentHash: h.ParentHash,
|
||||
FeeRecipient: h.FeeRecipient,
|
||||
StateRoot: h.StateRoot,
|
||||
@@ -189,7 +232,7 @@ type ExecutionPayloadHeader struct {
|
||||
BaseFeePerGas Uint256 `json:"base_fee_per_gas,omitempty"`
|
||||
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
|
||||
TransactionsRoot hexutil.Bytes `json:"transactions_root,omitempty"`
|
||||
*eth.ExecutionPayloadHeader
|
||||
*v1.ExecutionPayloadHeader
|
||||
}
|
||||
|
||||
func (h *ExecutionPayloadHeader) MarshalJSON() ([]byte, error) {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -31,7 +32,8 @@ func TestSignedValidatorRegistration_MarshalJSON(t *testing.T) {
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
je, err := json.Marshal(&SignedValidatorRegistration{SignedValidatorRegistrationV1: svr})
|
||||
a := &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr}
|
||||
je, err := json.Marshal(a)
|
||||
require.NoError(t, err)
|
||||
// decode with a struct w/ plain strings so we can check the string encoding of the hex fields
|
||||
un := struct {
|
||||
@@ -45,6 +47,14 @@ func TestSignedValidatorRegistration_MarshalJSON(t *testing.T) {
|
||||
require.Equal(t, "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", un.Signature)
|
||||
require.Equal(t, "0x0000000000000000000000000000000000000000", un.Message.FeeRecipient)
|
||||
require.Equal(t, "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", un.Message.Pubkey)
|
||||
|
||||
t.Run("roundtrip", func(t *testing.T) {
|
||||
b := &SignedValidatorRegistration{}
|
||||
if err := json.Unmarshal(je, b); err != nil {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, proto.Equal(a.SignedValidatorRegistrationV1, b.SignedValidatorRegistrationV1), true)
|
||||
})
|
||||
}
|
||||
|
||||
var testExampleHeaderResponse = `{
|
||||
@@ -205,7 +215,7 @@ func TestExecutionHeaderResponseToProto(t *testing.T) {
|
||||
|
||||
expected := ð.SignedBuilderBid{
|
||||
Message: ð.BuilderBid{
|
||||
Header: ð.ExecutionPayloadHeader{
|
||||
Header: &v1.ExecutionPayloadHeader{
|
||||
ParentHash: parentHash,
|
||||
FeeRecipient: feeRecipient,
|
||||
StateRoot: stateRoot,
|
||||
@@ -567,9 +577,9 @@ func TestProposerSlashings(t *testing.T) {
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbExecutionPayloadHeader(t *testing.T) *eth.ExecutionPayloadHeader {
|
||||
func pbExecutionPayloadHeader(t *testing.T) *v1.ExecutionPayloadHeader {
|
||||
bfpg := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
return ð.ExecutionPayloadHeader{
|
||||
return &v1.ExecutionPayloadHeader{
|
||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func HttpResponseModifier(ctx context.Context, w http.ResponseWriter, resp proto.Message) error {
|
||||
func HttpResponseModifier(ctx context.Context, w http.ResponseWriter, _ proto.Message) error {
|
||||
md, ok := gwruntime.ServerMetadataFromContext(ctx)
|
||||
if !ok {
|
||||
return nil
|
||||
|
||||
@@ -3,6 +3,7 @@ package async_test
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -16,7 +17,7 @@ func TestDebounce_NoEvents(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
interval := time.Second
|
||||
timesHandled := 0
|
||||
timesHandled := int32(0)
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
@@ -26,21 +27,21 @@ func TestDebounce_NoEvents(t *testing.T) {
|
||||
}()
|
||||
go func() {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
timesHandled++
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
wg.Done()
|
||||
}()
|
||||
if util.WaitTimeout(wg, interval*2) {
|
||||
t.Fatalf("Test should have exited by now, timed out")
|
||||
}
|
||||
assert.Equal(t, 0, timesHandled, "Wrong number of handled calls")
|
||||
assert.Equal(t, int32(0), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||
}
|
||||
|
||||
func TestDebounce_CtxClosing(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
interval := time.Second
|
||||
timesHandled := 0
|
||||
timesHandled := int32(0)
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
@@ -62,23 +63,23 @@ func TestDebounce_CtxClosing(t *testing.T) {
|
||||
}()
|
||||
go func() {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
timesHandled++
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
wg.Done()
|
||||
}()
|
||||
if util.WaitTimeout(wg, interval*2) {
|
||||
t.Fatalf("Test should have exited by now, timed out")
|
||||
}
|
||||
assert.Equal(t, 0, timesHandled, "Wrong number of handled calls")
|
||||
assert.Equal(t, int32(0), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||
}
|
||||
|
||||
func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
interval := time.Second
|
||||
timesHandled := 0
|
||||
timesHandled := int32(0)
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
timesHandled++
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
for i := 0; i < 100; i++ {
|
||||
eventsChan <- struct{}{}
|
||||
@@ -86,7 +87,7 @@ func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||
// We should expect 100 rapid fire changes to only have caused
|
||||
// 1 handler to trigger after the debouncing period.
|
||||
time.Sleep(interval * 2)
|
||||
assert.Equal(t, 1, timesHandled, "Wrong number of handled calls")
|
||||
assert.Equal(t, int32(1), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||
cancel()
|
||||
}
|
||||
|
||||
@@ -94,23 +95,23 @@ func TestDebounce_MultipleHandlerInvocation(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
interval := time.Second
|
||||
timesHandled := 0
|
||||
timesHandled := int32(0)
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
timesHandled++
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
for i := 0; i < 100; i++ {
|
||||
eventsChan <- struct{}{}
|
||||
}
|
||||
require.Equal(t, 0, timesHandled, "Events must prevent from handler execution")
|
||||
require.Equal(t, int32(0), atomic.LoadInt32(×Handled), "Events must prevent from handler execution")
|
||||
|
||||
// By this time the first event should be triggered.
|
||||
time.Sleep(2 * time.Second)
|
||||
assert.Equal(t, 1, timesHandled, "Wrong number of handled calls")
|
||||
assert.Equal(t, int32(1), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||
|
||||
// Second event.
|
||||
eventsChan <- struct{}{}
|
||||
time.Sleep(2 * time.Second)
|
||||
assert.Equal(t, 2, timesHandled, "Wrong number of handled calls")
|
||||
assert.Equal(t, int32(2), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||
|
||||
cancel()
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package async_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -11,15 +12,15 @@ import (
|
||||
func TestEveryRuns(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
i := 0
|
||||
i := int32(0)
|
||||
async.RunEvery(ctx, 100*time.Millisecond, func() {
|
||||
i++
|
||||
atomic.AddInt32(&i, 1)
|
||||
})
|
||||
|
||||
// Sleep for a bit and ensure the value has increased.
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
if i == 0 {
|
||||
if atomic.LoadInt32(&i) == 0 {
|
||||
t.Error("Counter failed to increment with ticker")
|
||||
}
|
||||
|
||||
@@ -28,12 +29,12 @@ func TestEveryRuns(t *testing.T) {
|
||||
// Sleep for a bit to let the cancel take place.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
last := i
|
||||
last := atomic.LoadInt32(&i)
|
||||
|
||||
// Sleep for a bit and ensure the value has not increased.
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
if i != last {
|
||||
if atomic.LoadInt32(&i) != last {
|
||||
t.Error("Counter incremented after stop")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ func (lk *Lock) Lock() {
|
||||
lk.unlock <- 1
|
||||
}
|
||||
|
||||
// Unlocks this lock. Must be called after Lock.
|
||||
// Unlock unlocks this lock. Must be called after Lock.
|
||||
// Can only be invoked if there is a previous call to Lock.
|
||||
func (lk *Lock) Unlock() {
|
||||
<-lk.unlock
|
||||
@@ -65,14 +65,14 @@ func (lk *Lock) Unlock() {
|
||||
<-lk.lock
|
||||
}
|
||||
|
||||
// Temporarily unlocks, gives up the cpu time to other goroutine, and attempts to lock again.
|
||||
// Yield temporarily unlocks, gives up the cpu time to other goroutine, and attempts to lock again.
|
||||
func (lk *Lock) Yield() {
|
||||
lk.Unlock()
|
||||
runtime.Gosched()
|
||||
lk.Lock()
|
||||
}
|
||||
|
||||
// Creates a new multilock for the specified keys
|
||||
// NewMultilock creates a new multilock for the specified keys
|
||||
func NewMultilock(locks ...string) *Lock {
|
||||
if len(locks) == 0 {
|
||||
return nil
|
||||
@@ -87,7 +87,7 @@ func NewMultilock(locks ...string) *Lock {
|
||||
}
|
||||
}
|
||||
|
||||
// Cleans old unused locks. Returns removed keys.
|
||||
// Clean cleans old unused locks. Returns removed keys.
|
||||
func Clean() []string {
|
||||
locks.lock <- 1
|
||||
defer func() { <-locks.lock }()
|
||||
|
||||
@@ -22,22 +22,22 @@ import (
|
||||
|
||||
func TestUnique(t *testing.T) {
|
||||
var arr []string
|
||||
assert := assert.New(t)
|
||||
a := assert.New(t)
|
||||
|
||||
arr = []string{"a", "b", "c"}
|
||||
assert.Equal(arr, unique(arr))
|
||||
a.Equal(arr, unique(arr))
|
||||
|
||||
arr = []string{"a", "a", "a"}
|
||||
assert.Equal([]string{"a"}, unique(arr))
|
||||
a.Equal([]string{"a"}, unique(arr))
|
||||
|
||||
arr = []string{"a", "a", "b"}
|
||||
assert.Equal([]string{"a", "b"}, unique(arr))
|
||||
a.Equal([]string{"a", "b"}, unique(arr))
|
||||
|
||||
arr = []string{"a", "b", "a"}
|
||||
assert.Equal([]string{"a", "b"}, unique(arr))
|
||||
a.Equal([]string{"a", "b"}, unique(arr))
|
||||
|
||||
arr = []string{"a", "b", "c", "b", "d"}
|
||||
assert.Equal([]string{"a", "b", "c", "d"}, unique(arr))
|
||||
a.Equal([]string{"a", "b", "c", "d"}, unique(arr))
|
||||
}
|
||||
|
||||
func TestGetChan(t *testing.T) {
|
||||
@@ -45,9 +45,9 @@ func TestGetChan(t *testing.T) {
|
||||
ch2 := getChan("aa")
|
||||
ch3 := getChan("a")
|
||||
|
||||
assert := assert.New(t)
|
||||
assert.NotEqual(ch1, ch2)
|
||||
assert.Equal(ch1, ch3)
|
||||
a := assert.New(t)
|
||||
a.NotEqual(ch1, ch2)
|
||||
a.Equal(ch1, ch3)
|
||||
}
|
||||
|
||||
func TestLockUnlock(_ *testing.T) {
|
||||
|
||||
@@ -12,7 +12,6 @@ go_library(
|
||||
"log.go",
|
||||
"merge_ascii_art.go",
|
||||
"metrics.go",
|
||||
"new_slot.go",
|
||||
"options.go",
|
||||
"pow_block.go",
|
||||
"process_attestation.go",
|
||||
@@ -35,7 +34,6 @@ go_library(
|
||||
deps = [
|
||||
"//async:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/store:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
@@ -65,7 +63,6 @@ go_library(
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/forks/bellatrix:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
@@ -113,7 +110,6 @@ go_test(
|
||||
"log_test.go",
|
||||
"metrics_test.go",
|
||||
"mock_test.go",
|
||||
"new_slot_test.go",
|
||||
"pow_block_test.go",
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
@@ -133,6 +129,7 @@ go_test(
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/powchain/testing:go_default_library",
|
||||
@@ -189,6 +186,7 @@ go_test(
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/powchain/testing:go_default_library",
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
@@ -81,10 +79,11 @@ type CanonicalFetcher interface {
|
||||
// FinalizationFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieve finalization and justification related data.
|
||||
type FinalizationFetcher interface {
|
||||
FinalizedCheckpt() (*ethpb.Checkpoint, error)
|
||||
CurrentJustifiedCheckpt() (*ethpb.Checkpoint, error)
|
||||
PreviousJustifiedCheckpt() (*ethpb.Checkpoint, error)
|
||||
FinalizedCheckpt() *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckpt() *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckpt() *ethpb.Checkpoint
|
||||
VerifyFinalizedBlkDescendant(ctx context.Context, blockRoot [32]byte) error
|
||||
IsFinalized(ctx context.Context, blockRoot [32]byte) bool
|
||||
}
|
||||
|
||||
// OptimisticModeFetcher retrieves information about optimistic status of the node.
|
||||
@@ -94,47 +93,27 @@ type OptimisticModeFetcher interface {
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
|
||||
func (s *Service) FinalizedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
cp, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
cp := s.ForkChoicer().FinalizedCheckpoint()
|
||||
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
}
|
||||
|
||||
return ethpb.CopyCheckpoint(cp), nil
|
||||
// PreviousJustifiedCheckpt returns the current justified checkpoint from chain store.
|
||||
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
cp := s.ForkChoicer().PreviousJustifiedCheckpoint()
|
||||
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
}
|
||||
|
||||
// CurrentJustifiedCheckpt returns the current justified checkpoint from chain store.
|
||||
func (s *Service) CurrentJustifiedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
cp, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ethpb.CopyCheckpoint(cp), nil
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpt returns the previous justified checkpoint from chain store.
|
||||
func (s *Service) PreviousJustifiedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
cp, err := s.store.PrevJustifiedCheckpt()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ethpb.CopyCheckpoint(cp), nil
|
||||
func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
cp := s.ForkChoicer().JustifiedCheckpoint()
|
||||
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
}
|
||||
|
||||
// BestJustifiedCheckpt returns the best justified checkpoint from store.
|
||||
func (s *Service) BestJustifiedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
cp, err := s.store.BestJustifiedCheckpt()
|
||||
if err != nil {
|
||||
// If there is no best justified checkpoint, return the checkpoint with root as zeros to be used for genesis cases.
|
||||
if errors.Is(err, store.ErrNilCheckpoint) {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ethpb.CopyCheckpoint(cp), nil
|
||||
func (s *Service) BestJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
cp := s.ForkChoicer().BestJustifiedCheckpoint()
|
||||
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
}
|
||||
|
||||
// HeadSlot returns the slot of the head of the chain.
|
||||
@@ -154,9 +133,8 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if s.headRoot() != params.BeaconConfig().ZeroHash {
|
||||
r := s.headRoot()
|
||||
return r[:], nil
|
||||
if s.head != nil && s.head.root != params.BeaconConfig().ZeroHash {
|
||||
return bytesutil.SafeCopyBytes(s.head.root[:]), nil
|
||||
}
|
||||
|
||||
b, err := s.cfg.BeaconDB.HeadBlock(ctx)
|
||||
@@ -330,6 +308,15 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
||||
return s.IsOptimisticForRoot(ctx, s.head.root)
|
||||
}
|
||||
|
||||
// IsFinalized returns true if the input root is finalized.
|
||||
// It first checks latest finalized root then checks finalized root index in DB.
|
||||
func (s *Service) IsFinalized(ctx context.Context, root [32]byte) bool {
|
||||
if s.ForkChoicer().FinalizedCheckpoint().Root == root {
|
||||
return true
|
||||
}
|
||||
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, root)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
@@ -356,8 +343,14 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Historical non-canonical blocks here are returned as optimistic for safety.
|
||||
isCanonical, err := s.IsCanonical(ctx, root)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if slots.ToEpoch(ss.Slot)+1 < validatedCheckpoint.Epoch {
|
||||
return false, nil
|
||||
return !isCanonical, nil
|
||||
}
|
||||
|
||||
// Checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
|
||||
@@ -372,13 +365,6 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
||||
if ss.Slot > lastValidated.Slot {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
isCanonical, err := s.IsCanonical(ctx, root)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Historical non-canonical blocks here are returned as optimistic for safety.
|
||||
return !isCanonical, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -5,11 +5,12 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
@@ -44,7 +46,7 @@ func prepareForkchoiceState(
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
|
||||
executionHeader := ðpb.ExecutionPayloadHeader{
|
||||
executionHeader := &enginev1.ExecutionPayloadHeader{
|
||||
BlockHash: payloadHash[:],
|
||||
}
|
||||
|
||||
@@ -77,81 +79,48 @@ func TestService_ForkChoiceStore(t *testing.T) {
|
||||
require.Equal(t, types.Epoch(0), p.FinalizedCheckpoint().Epoch)
|
||||
}
|
||||
|
||||
func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte("foo"), 32)}
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
c.store.SetFinalizedCheckptAndPayloadHash(cp, [32]byte{'a'})
|
||||
|
||||
cp, err := c.FinalizedCheckpt()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, cp.Epoch, cp.Epoch, "Unexpected finalized epoch")
|
||||
}
|
||||
|
||||
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
genesisRoot := [32]byte{'A'}
|
||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
c.store.SetFinalizedCheckptAndPayloadHash(cp, [32]byte{'a'})
|
||||
c.originBlockRoot = genesisRoot
|
||||
cp, err := c.FinalizedCheckpt()
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, c.originBlockRoot[:], cp.Root)
|
||||
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
cp := service.FinalizedCheckpt()
|
||||
assert.DeepEqual(t, [32]byte{}, bytesutil.ToBytes32(cp.Root))
|
||||
cp = service.CurrentJustifiedCheckpt()
|
||||
assert.DeepEqual(t, [32]byte{}, bytesutil.ToBytes32(cp.Root))
|
||||
// check that forkchoice has the right genesis root as the node root
|
||||
root, err := fcs.Head(ctx, []uint64{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, service.originBlockRoot, root)
|
||||
|
||||
}
|
||||
|
||||
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
_, err := c.CurrentJustifiedCheckpt()
|
||||
require.ErrorIs(t, err, store.ErrNilCheckpoint)
|
||||
cp := ðpb.Checkpoint{Epoch: 6, Root: bytesutil.PadTo([]byte("foo"), 32)}
|
||||
c.store.SetJustifiedCheckptAndPayloadHash(cp, [32]byte{})
|
||||
jp, err := c.CurrentJustifiedCheckpt()
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: [32]byte{'j'}}
|
||||
require.NoError(t, fcs.UpdateJustifiedCheckpoint(cp))
|
||||
jp := service.CurrentJustifiedCheckpt()
|
||||
assert.Equal(t, cp.Epoch, jp.Epoch, "Unexpected justified epoch")
|
||||
}
|
||||
|
||||
func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
genesisRoot := [32]byte{'B'}
|
||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
||||
c.store.SetJustifiedCheckptAndPayloadHash(cp, [32]byte{})
|
||||
c.originBlockRoot = genesisRoot
|
||||
cp, err := c.CurrentJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, c.originBlockRoot[:], cp.Root)
|
||||
}
|
||||
|
||||
func TestPreviousJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 7, Root: bytesutil.PadTo([]byte("foo"), 32)}
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
_, err := c.PreviousJustifiedCheckpt()
|
||||
require.ErrorIs(t, err, store.ErrNilCheckpoint)
|
||||
c.store.SetPrevJustifiedCheckpt(cp)
|
||||
pcp, err := c.PreviousJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, cp.Epoch, pcp.Epoch, "Unexpected previous justified epoch")
|
||||
}
|
||||
|
||||
func TestPrevJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
genesisRoot := [32]byte{'C'}
|
||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
c.store.SetPrevJustifiedCheckpt(cp)
|
||||
c.originBlockRoot = genesisRoot
|
||||
pcp, err := c.PreviousJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, c.originBlockRoot[:], pcp.Root)
|
||||
require.Equal(t, cp.Root, bytesutil.ToBytes32(jp.Root))
|
||||
}
|
||||
|
||||
func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
@@ -163,26 +132,46 @@ func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadRoot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{}
|
||||
c.head = &head{root: [32]byte{'A'}}
|
||||
r, err := c.HeadRoot(context.Background())
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, [32]byte{'A'}, bytesutil.ToBytes32(r))
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
|
||||
r, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, service.originBlockRoot, bytesutil.ToBytes32(r))
|
||||
}
|
||||
|
||||
func TestHeadRoot_UseDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:]}))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(context.Background(), br))
|
||||
r, err := c.HeadRoot(context.Background())
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: br[:]}))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, br))
|
||||
r, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, br, bytesutil.ToBytes32(r))
|
||||
}
|
||||
@@ -277,9 +266,7 @@ func TestIsCanonical_Ok(t *testing.T) {
|
||||
blk.Block.Slot = 0
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, root))
|
||||
can, err := c.IsCanonical(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -322,21 +309,21 @@ func TestService_ChainHeads_ProtoArray(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
roots, slots := c.ChainHeads()
|
||||
require.DeepEqual(t, [][32]byte{{'c'}, {'d'}, {'e'}}, roots)
|
||||
@@ -354,24 +341,24 @@ func TestService_ChainHeads_DoublyLinkedTree(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
roots, slots := c.ChainHeads()
|
||||
require.Equal(t, 3, len(roots))
|
||||
@@ -451,12 +438,12 @@ func TestService_IsOptimistic_ProtoArray(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -473,12 +460,12 @@ func TestService_IsOptimistic_DoublyLinkedTree(t *testing.T) {
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -498,12 +485,12 @@ func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
@@ -515,12 +502,12 @@ func TestService_IsOptimisticForRoot_DoublyLinkedTree(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
@@ -536,26 +523,20 @@ func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
|
||||
b.Block.Slot = 10
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||
|
||||
optimisticBlock := util.NewBeaconBlock()
|
||||
optimisticBlock.Block.Slot = 97
|
||||
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(optimisticBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock)
|
||||
|
||||
validatedBlock := util.NewBeaconBlock()
|
||||
validatedBlock.Block.Slot = 9
|
||||
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(validatedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
util.SaveBlock(t, context.Background(), beaconDB, validatedBlock)
|
||||
|
||||
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
@@ -601,26 +582,20 @@ func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
|
||||
b.Block.Slot = 10
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||
|
||||
optimisticBlock := util.NewBeaconBlock()
|
||||
optimisticBlock.Block.Slot = 97
|
||||
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(optimisticBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock)
|
||||
|
||||
validatedBlock := util.NewBeaconBlock()
|
||||
validatedBlock.Block.Slot = 9
|
||||
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(validatedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
util.SaveBlock(t, context.Background(), beaconDB, validatedBlock)
|
||||
|
||||
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
@@ -665,26 +640,20 @@ func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
||||
b.Block.Slot = 10
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||
|
||||
optimisticBlock := util.NewBeaconBlock()
|
||||
optimisticBlock.Block.Slot = 97
|
||||
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(optimisticBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock)
|
||||
|
||||
validatedBlock := util.NewBeaconBlock()
|
||||
validatedBlock.Block.Slot = 9
|
||||
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(validatedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
util.SaveBlock(t, context.Background(), beaconDB, validatedBlock)
|
||||
|
||||
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
@@ -700,3 +669,25 @@ func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
||||
require.Equal(t, true, validated)
|
||||
|
||||
}
|
||||
|
||||
func TestService_IsFinalized(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}}
|
||||
r1 := [32]byte{'a'}
|
||||
require.NoError(t, c.ForkChoiceStore().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Root: r1,
|
||||
}))
|
||||
b := util.NewBeaconBlock()
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, b)
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{
|
||||
Root: br[:],
|
||||
}))
|
||||
require.Equal(t, true, c.IsFinalized(ctx, r1))
|
||||
require.Equal(t, true, c.IsFinalized(ctx, br))
|
||||
require.Equal(t, false, c.IsFinalized(ctx, [32]byte{'c'}))
|
||||
}
|
||||
|
||||
@@ -4,7 +4,9 @@ import "github.com/pkg/errors"
|
||||
|
||||
var (
|
||||
// ErrInvalidPayload is returned when the payload is invalid
|
||||
ErrInvalidPayload = errors.New("recevied an INVALID payload from execution engine")
|
||||
ErrInvalidPayload = invalidBlock{error: errors.New("received an INVALID payload from execution engine")}
|
||||
// ErrInvalidBlockHashPayloadStatus is returned when the payload has invalid block hash.
|
||||
ErrInvalidBlockHashPayloadStatus = invalidBlock{error: errors.New("received an INVALID_BLOCK_HASH payload from execution engine")}
|
||||
// ErrUndefinedExecutionEngineError is returned when the execution engine returns an error that is not defined
|
||||
ErrUndefinedExecutionEngineError = errors.New("received an undefined ee error")
|
||||
// errNilFinalizedInStore is returned when a nil finalized checkpt is returned from store.
|
||||
@@ -28,7 +30,7 @@ var (
|
||||
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
|
||||
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
|
||||
// errNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
|
||||
errNotDescendantOfFinalized = invalidBlock{errors.New("not descendant of finalized checkpoint")}
|
||||
errNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
|
||||
)
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
@@ -38,17 +40,25 @@ var (
|
||||
// The block is deemed invalid according to execution layer client.
|
||||
// The block violates certain fork choice rules (before finalized slot, not finalized ancestor)
|
||||
type invalidBlock struct {
|
||||
invalidAncestorRoots [][32]byte
|
||||
error
|
||||
root [32]byte
|
||||
}
|
||||
|
||||
type invalidBlockError interface {
|
||||
Error() string
|
||||
InvalidBlock() bool
|
||||
InvalidAncestorRoots() [][32]byte
|
||||
BlockRoot() [32]byte
|
||||
}
|
||||
|
||||
// InvalidBlock returns true for `invalidBlock`.
|
||||
func (e invalidBlock) InvalidBlock() bool {
|
||||
return true
|
||||
// BlockRoot returns the invalid block root.
|
||||
func (e invalidBlock) BlockRoot() [32]byte {
|
||||
return e.root
|
||||
}
|
||||
|
||||
// InvalidAncestorRoots returns an optional list of invalid roots of the invalid block which leads up last valid root.
|
||||
func (e invalidBlock) InvalidAncestorRoots() [][32]byte {
|
||||
return e.invalidAncestorRoots
|
||||
}
|
||||
|
||||
// IsInvalidBlock returns true if the error has `invalidBlock`.
|
||||
@@ -56,9 +66,34 @@ func IsInvalidBlock(e error) bool {
|
||||
if e == nil {
|
||||
return false
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
_, ok := e.(invalidBlockError)
|
||||
if !ok {
|
||||
return IsInvalidBlock(errors.Unwrap(e))
|
||||
}
|
||||
return d.InvalidBlock()
|
||||
return true
|
||||
}
|
||||
|
||||
// InvalidBlockRoot returns the invalid block root. If the error
|
||||
// doesn't have an invalid blockroot. [32]byte{} is returned.
|
||||
func InvalidBlockRoot(e error) [32]byte {
|
||||
if e == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
if !ok {
|
||||
return [32]byte{}
|
||||
}
|
||||
return d.BlockRoot()
|
||||
}
|
||||
|
||||
// InvalidAncestorRoots returns a list of invalid roots up to last valid root.
|
||||
func InvalidAncestorRoots(e error) [][32]byte {
|
||||
if e == nil {
|
||||
return [][32]byte{}
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
if !ok {
|
||||
return [][32]byte{}
|
||||
}
|
||||
return d.InvalidAncestorRoots()
|
||||
}
|
||||
|
||||
@@ -8,10 +8,29 @@ import (
|
||||
)
|
||||
|
||||
func TestIsInvalidBlock(t *testing.T) {
|
||||
require.Equal(t, false, IsInvalidBlock(ErrInvalidPayload))
|
||||
err := invalidBlock{ErrInvalidPayload}
|
||||
require.Equal(t, true, IsInvalidBlock(ErrInvalidPayload)) // Already wrapped.
|
||||
err := invalidBlock{error: ErrInvalidPayload}
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
|
||||
newErr := errors.Wrap(err, "wrap me")
|
||||
require.Equal(t, true, IsInvalidBlock(newErr))
|
||||
require.DeepEqual(t, [][32]byte(nil), InvalidAncestorRoots(err))
|
||||
}
|
||||
|
||||
func TestInvalidBlockRoot(t *testing.T) {
|
||||
require.Equal(t, [32]byte{}, InvalidBlockRoot(ErrUndefinedExecutionEngineError))
|
||||
require.Equal(t, [32]byte{}, InvalidBlockRoot(ErrInvalidPayload))
|
||||
|
||||
err := invalidBlock{error: ErrInvalidPayload, root: [32]byte{'a'}}
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
|
||||
require.DeepEqual(t, [][32]byte(nil), InvalidAncestorRoots(err))
|
||||
}
|
||||
|
||||
func TestInvalidRoots(t *testing.T) {
|
||||
roots := [][32]byte{{'d'}, {'b'}, {'c'}}
|
||||
err := invalidBlock{error: ErrInvalidPayload, root: [32]byte{'a'}, invalidAncestorRoots: roots}
|
||||
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
|
||||
require.DeepEqual(t, roots, InvalidAncestorRoots(err))
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -40,24 +39,27 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
|
||||
headBlk := arg.headBlock
|
||||
if headBlk == nil || headBlk.IsNil() || headBlk.Body().IsNil() {
|
||||
return nil, errors.New("nil head block")
|
||||
log.Error("Head block is nil")
|
||||
return nil, nil
|
||||
}
|
||||
// Must not call fork choice updated until the transition conditions are met on the Pow network.
|
||||
isExecutionBlk, err := blocks.IsExecutionBlock(headBlk.Body())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not determine if block is execution block")
|
||||
log.WithError(err).Error("Could not determine if head block is execution block")
|
||||
return nil, nil
|
||||
}
|
||||
if !isExecutionBlk {
|
||||
return nil, nil
|
||||
}
|
||||
headPayload, err := headBlk.Body().ExecutionPayload()
|
||||
headPayload, err := headBlk.Body().Execution()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload")
|
||||
log.WithError(err).Error("Could not get execution payload for head block")
|
||||
return nil, nil
|
||||
}
|
||||
finalizedHash := s.store.FinalizedPayloadBlockHash()
|
||||
justifiedHash := s.store.JustifiedPayloadBlockHash()
|
||||
finalizedHash := s.ForkChoicer().FinalizedPayloadBlockHash()
|
||||
justifiedHash := s.ForkChoicer().JustifiedPayloadBlockHash()
|
||||
fcs := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: headPayload.BlockHash,
|
||||
HeadBlockHash: headPayload.BlockHash(),
|
||||
SafeBlockHash: justifiedHash[:],
|
||||
FinalizedBlockHash: finalizedHash[:],
|
||||
}
|
||||
@@ -65,7 +67,8 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
|
||||
hasAttr, attr, proposerId, err := s.getPayloadAttribute(ctx, arg.headState, nextSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get payload attribute")
|
||||
log.WithError(err).Error("Could not get head payload attribute")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
|
||||
@@ -75,32 +78,41 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
forkchoiceUpdatedOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"headSlot": headBlk.Slot(),
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash())),
|
||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
|
||||
}).Info("Called fork choice updated with optimistic block")
|
||||
return payloadID, s.optimisticCandidateBlock(ctx, headBlk)
|
||||
err := s.optimisticCandidateBlock(ctx, headBlk)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Optimistic block failed to be candidate")
|
||||
}
|
||||
return payloadID, nil
|
||||
case powchain.ErrInvalidPayloadStatus:
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
headRoot := arg.headRoot
|
||||
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, headRoot, bytesutil.ToBytes32(headBlk.ParentRoot()), bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.WithError(err).Error("Could not set head root to invalid")
|
||||
return nil, nil
|
||||
}
|
||||
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
|
||||
return nil, err
|
||||
log.WithError(err).Error("Could not remove invalid block and state")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
r, err := s.cfg.ForkChoiceStore.Head(ctx, s.justifiedBalances.balances)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.WithError(err).Error("Could not get head root")
|
||||
return nil, nil
|
||||
}
|
||||
b, err := s.getBlock(ctx, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.WithError(err).Error("Could not get head block")
|
||||
return nil, nil
|
||||
}
|
||||
st, err := s.cfg.StateGen.StateByRoot(ctx, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.WithError(err).Error("Could not get head state")
|
||||
return nil, nil
|
||||
}
|
||||
pid, err := s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: st,
|
||||
@@ -108,28 +120,40 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
headBlock: b.Block(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, err // Returning err because it's recursive here.
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, r, b, st); err != nil {
|
||||
log.WithError(err).Error("could not save head after pruning invalid blocks")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": headBlk.Slot(),
|
||||
"blockRoot": fmt.Sprintf("%#x", headRoot),
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot[:])),
|
||||
"invalidCount": len(invalidRoots),
|
||||
"newHeadRoot": fmt.Sprintf("%#x", bytesutil.Trunc(r[:])),
|
||||
}).Warn("Pruned invalid blocks")
|
||||
return pid, ErrInvalidPayload
|
||||
return pid, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot, invalidAncestorRoots: invalidRoots}
|
||||
|
||||
default:
|
||||
return nil, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
log.WithError(err).Error(ErrUndefinedExecutionEngineError)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
forkchoiceUpdatedValidNodeCount.Inc()
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, arg.headRoot); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set block to valid")
|
||||
log.WithError(err).Error("Could not set head root to valid")
|
||||
return nil, nil
|
||||
}
|
||||
if hasAttr { // If the forkchoice update call has an attribute, update the proposer payload ID cache.
|
||||
if hasAttr && payloadID != nil { // If the forkchoice update call has an attribute, update the proposer payload ID cache.
|
||||
var pId [8]byte
|
||||
copy(pId[:], payloadID[:])
|
||||
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId)
|
||||
} else if hasAttr && payloadID == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
|
||||
"slot": headBlk.Slot(),
|
||||
}).Error("Received nil payload ID on VALID engine response")
|
||||
}
|
||||
return payloadID, nil
|
||||
}
|
||||
@@ -144,17 +168,17 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
|
||||
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
|
||||
return params.BeaconConfig().ZeroHash, nil
|
||||
}
|
||||
payload, err := blk.Block().Body().ExecutionPayload()
|
||||
payload, err := blk.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
return bytesutil.ToBytes32(payload.BlockHash), nil
|
||||
return bytesutil.ToBytes32(payload.BlockHash()), nil
|
||||
}
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine on a new payload.
|
||||
// It returns true if the EL has returned VALID for the block
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
postStateHeader *ethpb.ExecutionPayloadHeader, blk interfaces.SignedBeaconBlock) (bool, error) {
|
||||
postStateHeader *enginev1.ExecutionPayloadHeader, blk interfaces.SignedBeaconBlock) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
|
||||
defer span.End()
|
||||
|
||||
@@ -169,14 +193,14 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
body := blk.Block().Body()
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(postStateHeader, body)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{err}, "could not determine if execution is enabled")
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
|
||||
}
|
||||
if !enabled {
|
||||
return true, nil
|
||||
}
|
||||
payload, err := body.ExecutionPayload()
|
||||
payload, err := body.Execution()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{err}, "could not get execution payload")
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not get execution payload")
|
||||
}
|
||||
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
||||
switch err {
|
||||
@@ -187,7 +211,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
return false, s.optimisticCandidateBlock(ctx, blk.Block())
|
||||
case powchain.ErrInvalidPayloadStatus:
|
||||
@@ -208,7 +232,13 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
"blockRoot": fmt.Sprintf("%#x", root),
|
||||
"invalidCount": len(invalidRoots),
|
||||
}).Warn("Pruned invalid blocks")
|
||||
return false, invalidBlock{ErrInvalidPayload}
|
||||
return false, invalidBlock{
|
||||
invalidAncestorRoots: invalidRoots,
|
||||
error: ErrInvalidPayload,
|
||||
}
|
||||
case powchain.ErrInvalidBlockHashPayloadStatus:
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
return false, ErrInvalidBlockHashPayloadStatus
|
||||
default:
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
@@ -27,23 +28,19 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
params.BeaconConfig().SafeSlotsToImportOptimistically = 0
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
altairBlk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlockAltair())
|
||||
require.NoError(t, err)
|
||||
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
|
||||
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlockBellatrix())
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
|
||||
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, altairBlk))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
@@ -78,10 +75,6 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
newForkchoiceErr error
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
name: "nil block",
|
||||
errString: "nil head block",
|
||||
},
|
||||
{
|
||||
name: "phase0 block",
|
||||
blk: func() interfaces.BeaconBlock {
|
||||
@@ -187,9 +180,6 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, tt.finalizedRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, tt.finalizedRoot))
|
||||
fc := ðpb.Checkpoint{Epoch: 0, Root: tt.finalizedRoot[:]}
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(fc, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(fc, [32]byte{'b'})
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
headState: st,
|
||||
headRoot: tt.headRoot,
|
||||
@@ -198,6 +188,10 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
_, err = service.notifyForkchoiceUpdate(ctx, arg)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
if tt.errString == ErrInvalidPayload.Error() {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, tt.headRoot, InvalidBlockRoot(err)) // Head root should be invalid. Not block root!
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -220,86 +214,69 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
// 2. forkchoice removes the weights of these blocks
|
||||
// 3. the blockchain package calls fcu to obtain heads G -> F -> D.
|
||||
|
||||
func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
|
||||
func Test_NotifyForkchoiceUpdateRecursive_Protoarray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
// Prepare blocks
|
||||
ba := util.NewBeaconBlockBellatrix()
|
||||
ba.Block.Body.ExecutionPayload.BlockNumber = 1
|
||||
wba, err := wrapper.WrappedSignedBeaconBlock(ba)
|
||||
require.NoError(t, err)
|
||||
wba := util.SaveBlock(t, ctx, beaconDB, ba)
|
||||
bra, err := wba.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wba))
|
||||
|
||||
bb := util.NewBeaconBlockBellatrix()
|
||||
bb.Block.Body.ExecutionPayload.BlockNumber = 2
|
||||
wbb, err := wrapper.WrappedSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
wbb := util.SaveBlock(t, ctx, beaconDB, bb)
|
||||
brb, err := wbb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbb))
|
||||
|
||||
bc := util.NewBeaconBlockBellatrix()
|
||||
bc.Block.Body.ExecutionPayload.BlockNumber = 3
|
||||
wbc, err := wrapper.WrappedSignedBeaconBlock(bc)
|
||||
require.NoError(t, err)
|
||||
wbc := util.SaveBlock(t, ctx, beaconDB, bc)
|
||||
brc, err := wbc.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbc))
|
||||
|
||||
bd := util.NewBeaconBlockBellatrix()
|
||||
pd := [32]byte{'D'}
|
||||
bd.Block.Body.ExecutionPayload.BlockHash = pd[:]
|
||||
bd.Block.Body.ExecutionPayload.BlockNumber = 4
|
||||
wbd, err := wrapper.WrappedSignedBeaconBlock(bd)
|
||||
require.NoError(t, err)
|
||||
wbd := util.SaveBlock(t, ctx, beaconDB, bd)
|
||||
brd, err := wbd.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbd))
|
||||
|
||||
be := util.NewBeaconBlockBellatrix()
|
||||
pe := [32]byte{'E'}
|
||||
be.Block.Body.ExecutionPayload.BlockHash = pe[:]
|
||||
be.Block.Body.ExecutionPayload.BlockNumber = 5
|
||||
wbe, err := wrapper.WrappedSignedBeaconBlock(be)
|
||||
require.NoError(t, err)
|
||||
wbe := util.SaveBlock(t, ctx, beaconDB, be)
|
||||
bre, err := wbe.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbe))
|
||||
|
||||
bf := util.NewBeaconBlockBellatrix()
|
||||
pf := [32]byte{'F'}
|
||||
bf.Block.Body.ExecutionPayload.BlockHash = pf[:]
|
||||
bf.Block.Body.ExecutionPayload.BlockNumber = 6
|
||||
bf.Block.ParentRoot = bre[:]
|
||||
wbf, err := wrapper.WrappedSignedBeaconBlock(bf)
|
||||
require.NoError(t, err)
|
||||
wbf := util.SaveBlock(t, ctx, beaconDB, bf)
|
||||
brf, err := wbf.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbf))
|
||||
|
||||
bg := util.NewBeaconBlockBellatrix()
|
||||
bg.Block.Body.ExecutionPayload.BlockNumber = 7
|
||||
pg := [32]byte{'G'}
|
||||
bg.Block.Body.ExecutionPayload.BlockHash = pg[:]
|
||||
bg.Block.ParentRoot = bre[:]
|
||||
wbg, err := wrapper.WrappedSignedBeaconBlock(bg)
|
||||
require.NoError(t, err)
|
||||
wbg := util.SaveBlock(t, ctx, beaconDB, bg)
|
||||
brg, err := wbg.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbg))
|
||||
|
||||
// Insert blocks into forkchoice
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
fcs := protoarray.New()
|
||||
service.cfg.ForkChoiceStore = fcs
|
||||
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
|
||||
service.justifiedBalances.balances = []uint64{50, 100, 200}
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
@@ -340,19 +317,167 @@ func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
|
||||
// Prepare Engine Mock to return invalid unless head is D, LVH = E
|
||||
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: powchain.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: pe[:], OverrideValidHash: [32]byte{'D'}}
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
service.head = &head{
|
||||
state: st,
|
||||
block: wba,
|
||||
}
|
||||
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
|
||||
fc := ðpb.Checkpoint{Epoch: 0, Root: bra[:]}
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(fc, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(fc, [32]byte{'b'})
|
||||
a := ¬ifyForkchoiceUpdateArg{
|
||||
headState: st,
|
||||
headBlock: wbg.Block(),
|
||||
headRoot: brg,
|
||||
}
|
||||
_, err = service.notifyForkchoiceUpdate(ctx, a)
|
||||
require.ErrorIs(t, ErrInvalidPayload, err)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, brf, InvalidBlockRoot(err))
|
||||
|
||||
// Ensure Head is D
|
||||
headRoot, err = fcs.Head(ctx, service.justifiedBalances.balances)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, brd, headRoot)
|
||||
|
||||
// Ensure F and G where removed but their parent E wasn't
|
||||
require.Equal(t, false, fcs.HasNode(brf))
|
||||
require.Equal(t, false, fcs.HasNode(brg))
|
||||
require.Equal(t, true, fcs.HasNode(bre))
|
||||
}
|
||||
|
||||
//
|
||||
//
|
||||
// A <- B <- C <- D
|
||||
// \
|
||||
// ---------- E <- F
|
||||
// \
|
||||
// ------ G
|
||||
// D is the current head, attestations for F and G come late, both are invalid.
|
||||
// We switch recursively to F then G and finally to D.
|
||||
//
|
||||
// We test:
|
||||
// 1. forkchoice removes blocks F and G from the forkchoice implementation
|
||||
// 2. forkchoice removes the weights of these blocks
|
||||
// 3. the blockchain package calls fcu to obtain heads G -> F -> D.
|
||||
|
||||
func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
// Prepare blocks
|
||||
ba := util.NewBeaconBlockBellatrix()
|
||||
ba.Block.Body.ExecutionPayload.BlockNumber = 1
|
||||
wba := util.SaveBlock(t, ctx, beaconDB, ba)
|
||||
bra, err := wba.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
bb := util.NewBeaconBlockBellatrix()
|
||||
bb.Block.Body.ExecutionPayload.BlockNumber = 2
|
||||
wbb := util.SaveBlock(t, ctx, beaconDB, bb)
|
||||
brb, err := wbb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
bc := util.NewBeaconBlockBellatrix()
|
||||
bc.Block.Body.ExecutionPayload.BlockNumber = 3
|
||||
wbc := util.SaveBlock(t, ctx, beaconDB, bc)
|
||||
brc, err := wbc.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
bd := util.NewBeaconBlockBellatrix()
|
||||
pd := [32]byte{'D'}
|
||||
bd.Block.Body.ExecutionPayload.BlockHash = pd[:]
|
||||
bd.Block.Body.ExecutionPayload.BlockNumber = 4
|
||||
wbd := util.SaveBlock(t, ctx, beaconDB, bd)
|
||||
brd, err := wbd.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
be := util.NewBeaconBlockBellatrix()
|
||||
pe := [32]byte{'E'}
|
||||
be.Block.Body.ExecutionPayload.BlockHash = pe[:]
|
||||
be.Block.Body.ExecutionPayload.BlockNumber = 5
|
||||
wbe := util.SaveBlock(t, ctx, beaconDB, be)
|
||||
bre, err := wbe.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
bf := util.NewBeaconBlockBellatrix()
|
||||
pf := [32]byte{'F'}
|
||||
bf.Block.Body.ExecutionPayload.BlockHash = pf[:]
|
||||
bf.Block.Body.ExecutionPayload.BlockNumber = 6
|
||||
bf.Block.ParentRoot = bre[:]
|
||||
wbf := util.SaveBlock(t, ctx, beaconDB, bf)
|
||||
brf, err := wbf.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
bg := util.NewBeaconBlockBellatrix()
|
||||
bg.Block.Body.ExecutionPayload.BlockNumber = 7
|
||||
pg := [32]byte{'G'}
|
||||
bg.Block.Body.ExecutionPayload.BlockHash = pg[:]
|
||||
bg.Block.ParentRoot = bre[:]
|
||||
wbg := util.SaveBlock(t, ctx, beaconDB, bg)
|
||||
brg, err := wbg.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Insert blocks into forkchoice
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
fcs := doublylinkedtree.New()
|
||||
service.cfg.ForkChoiceStore = fcs
|
||||
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
|
||||
service.justifiedBalances.balances = []uint64{50, 100, 200}
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, brc, brb, [32]byte{'C'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, brd, brc, [32]byte{'D'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, bre, brb, [32]byte{'E'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 6, brf, bre, [32]byte{'F'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 7, brg, bre, [32]byte{'G'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// Insert Attestations to D, F and G so that they have higher weight than D
|
||||
// Ensure G is head
|
||||
fcs.ProcessAttestation(ctx, []uint64{0}, brd, 1)
|
||||
fcs.ProcessAttestation(ctx, []uint64{1}, brf, 1)
|
||||
fcs.ProcessAttestation(ctx, []uint64{2}, brg, 1)
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: bra}
|
||||
require.NoError(t, fcs.UpdateJustifiedCheckpoint(jc))
|
||||
headRoot, err := fcs.Head(ctx, []uint64{50, 100, 200})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, brg, headRoot)
|
||||
|
||||
// Prepare Engine Mock to return invalid unless head is D, LVH = E
|
||||
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: powchain.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: pe[:], OverrideValidHash: [32]byte{'D'}}
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
service.head = &head{
|
||||
state: st,
|
||||
block: wba,
|
||||
}
|
||||
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
|
||||
a := ¬ifyForkchoiceUpdateArg{
|
||||
headState: st,
|
||||
headBlock: wbg.Block(),
|
||||
headRoot: brg,
|
||||
}
|
||||
_, err = service.notifyForkchoiceUpdate(ctx, a)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, brf, InvalidBlockRoot(err))
|
||||
|
||||
// Ensure Head is D
|
||||
headRoot, err = fcs.Head(ctx, service.justifiedBalances.balances)
|
||||
require.NoError(t, err)
|
||||
@@ -553,16 +678,40 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
newPayloadErr: ErrUndefinedExecutionEngineError,
|
||||
errString: ErrUndefinedExecutionEngineError.Error(),
|
||||
},
|
||||
{
|
||||
name: "invalid block hash error from ee",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.SignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
newPayloadErr: ErrInvalidBlockHashPayloadStatus,
|
||||
errString: ErrInvalidBlockHashPayloadStatus.Error(),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
e := &mockPOW.EngineClient{ErrNewPayload: tt.newPayloadErr, BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("b")),
|
||||
},
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
e.BlockByHashMap[[32]byte{'b'}] = &v1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("3")),
|
||||
},
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
@@ -615,11 +764,15 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
e := &mockPOW.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("b")),
|
||||
},
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
e.BlockByHashMap[[32]byte{'b'}] = &v1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("3")),
|
||||
},
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
@@ -723,14 +876,7 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
jRoot, err := tt.justified.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, tt.justified))
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(
|
||||
ðpb.Checkpoint{
|
||||
Root: jRoot[:],
|
||||
Epoch: slots.ToEpoch(tt.justified.Block().Slot()),
|
||||
}, [32]byte{'a'})
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedParentBlock))
|
||||
|
||||
err = service.optimisticCandidateBlock(ctx, tt.blk)
|
||||
@@ -773,9 +919,7 @@ func Test_IsOptimisticShallowExecutionParent(t *testing.T) {
|
||||
b := ðpb.BeaconBlockBellatrix{Body: body, Slot: 200}
|
||||
rawSigned := ðpb.SignedBeaconBlockBellatrix{Block: b}
|
||||
blk := util.HydrateSignedBeaconBlockBellatrix(rawSigned)
|
||||
wr, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wr))
|
||||
wr := util.SaveBlock(t, ctx, service.cfg.BeaconDB, blk)
|
||||
blkRoot, err := wr.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -783,9 +927,7 @@ func Test_IsOptimisticShallowExecutionParent(t *testing.T) {
|
||||
childBlock.Block.ParentRoot = blkRoot[:]
|
||||
// shallow block
|
||||
childBlock.Block.Slot = 201
|
||||
wrappedChild, err := wrapper.WrappedSignedBeaconBlock(childBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedChild))
|
||||
wrappedChild := util.SaveBlock(t, ctx, service.cfg.BeaconDB, childBlock)
|
||||
err = service.optimisticCandidateBlock(ctx, wrappedChild.Block())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -848,9 +990,7 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
wr, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wr))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
genesisRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
@@ -878,9 +1018,7 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 320
|
||||
blk.Block.ParentRoot = genesisRoot[:]
|
||||
wr, err = wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wr))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
opRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -909,9 +1047,7 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
blk = util.NewBeaconBlock()
|
||||
blk.Block.Slot = 640
|
||||
blk.Block.ParentRoot = opRoot[:]
|
||||
wr, err = wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wr))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
validRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -959,12 +1095,10 @@ func TestService_removeInvalidBlockAndState(t *testing.T) {
|
||||
// Happy case
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
blk1, err := wrapper.WrappedSignedBeaconBlock(b1)
|
||||
require.NoError(t, err)
|
||||
blk1 := util.SaveBlock(t, ctx, service.cfg.BeaconDB, b1)
|
||||
r1, err := blk1.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk1))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: 1,
|
||||
Root: r1[:],
|
||||
@@ -973,11 +1107,9 @@ func TestService_removeInvalidBlockAndState(t *testing.T) {
|
||||
|
||||
b2 := util.NewBeaconBlock()
|
||||
b2.Block.Slot = 2
|
||||
blk2, err := wrapper.WrappedSignedBeaconBlock(b2)
|
||||
require.NoError(t, err)
|
||||
blk2 := util.SaveBlock(t, ctx, service.cfg.BeaconDB, b2)
|
||||
r2, err := blk2.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk2))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: 2,
|
||||
Root: r2[:],
|
||||
@@ -1017,7 +1149,7 @@ func TestService_getPayloadHash(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(r, wsb)
|
||||
require.NoError(t, service.saveInitSyncBlock(ctx, r, wsb))
|
||||
|
||||
h, err := service.getPayloadHash(ctx, r[:])
|
||||
require.NoError(t, err)
|
||||
@@ -1030,7 +1162,7 @@ func TestService_getPayloadHash(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(r, wsb)
|
||||
require.NoError(t, service.saveInitSyncBlock(ctx, r, wsb))
|
||||
|
||||
h, err = service.getPayloadHash(ctx, r[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -26,10 +26,7 @@ import (
|
||||
// UpdateAndSaveHeadWithBalances updates the beacon state head after getting justified balanced from cache.
|
||||
// This function is only used in spec-tests, it does save the head after updating it.
|
||||
func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
|
||||
jp, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
jp := s.CurrentJustifiedCheckpt()
|
||||
|
||||
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(jp.Root))
|
||||
if err != nil {
|
||||
@@ -66,11 +63,15 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
defer span.End()
|
||||
|
||||
// Do nothing if head hasn't changed.
|
||||
oldHeadroot, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
var oldHeadRoot [32]byte
|
||||
s.headLock.RLock()
|
||||
if s.head == nil {
|
||||
oldHeadRoot = s.originBlockRoot
|
||||
} else {
|
||||
oldHeadRoot = s.head.root
|
||||
}
|
||||
if newHeadRoot == bytesutil.ToBytes32(oldHeadroot) {
|
||||
s.headLock.RUnlock()
|
||||
if newHeadRoot == oldHeadRoot {
|
||||
return nil
|
||||
}
|
||||
if err := wrapper.BeaconBlockIsNil(headBlock); err != nil {
|
||||
@@ -88,13 +89,12 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
s.headLock.RLock()
|
||||
oldHeadRoot := s.headRoot()
|
||||
oldStateRoot := s.headBlock().Block().StateRoot()
|
||||
s.headLock.RUnlock()
|
||||
headSlot := s.HeadSlot()
|
||||
newHeadSlot := headBlock.Block().Slot()
|
||||
newStateRoot := headBlock.Block().StateRoot()
|
||||
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != bytesutil.ToBytes32(oldHeadroot) {
|
||||
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != oldHeadRoot {
|
||||
log.WithFields(logrus.Fields{
|
||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||
"oldSlot": fmt.Sprintf("%d", headSlot),
|
||||
@@ -118,7 +118,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
},
|
||||
})
|
||||
|
||||
if err := s.saveOrphanedAtts(ctx, bytesutil.ToBytes32(oldHeadroot), newHeadRoot); err != nil {
|
||||
if err := s.saveOrphanedAtts(ctx, oldHeadRoot, newHeadRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
reorgCount.Inc()
|
||||
@@ -220,7 +220,7 @@ func (s *Service) headBlock() interfaces.SignedBeaconBlock {
|
||||
// It does a full copy on head state for immutability.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headState(ctx context.Context) state.BeaconState {
|
||||
_, span := trace.StartSpan(ctx, "blockChain.headState")
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.headState")
|
||||
defer span.End()
|
||||
|
||||
return s.head.state.Copy()
|
||||
@@ -313,7 +313,7 @@ func (s *Service) notifyNewHeadEvent(
|
||||
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
|
||||
commonAncestorRoot, err := s.ForkChoicer().CommonAncestorRoot(ctx, newHeadRoot, orphanedRoot)
|
||||
switch {
|
||||
// Exit early if there's no common ancestor as there would be nothing to save.
|
||||
// Exit early if there's no common ancestor and root doesn't exist, there would be nothing to save.
|
||||
case errors.Is(err, forkchoice.ErrUnknownCommonAncestor):
|
||||
return nil
|
||||
case err != nil:
|
||||
|
||||
@@ -44,12 +44,7 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
util.NewBeaconBlock()
|
||||
oldBlock, err := wrapper.WrappedSignedBeaconBlock(
|
||||
util.NewBeaconBlock(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
|
||||
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
|
||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
@@ -67,9 +62,7 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
newHeadSignedBlock.Block.Slot = 1
|
||||
newHeadBlock := newHeadSignedBlock.Block
|
||||
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(newHeadSignedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, ojc, ofc)
|
||||
@@ -97,11 +90,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
oldBlock, err := wrapper.WrappedSignedBeaconBlock(
|
||||
util.NewBeaconBlock(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
|
||||
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
|
||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
@@ -121,9 +110,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
newHeadSignedBlock.Block.ParentRoot = reorgChainParent[:]
|
||||
newHeadBlock := newHeadSignedBlock.Block
|
||||
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(newHeadSignedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, ojc, ofc)
|
||||
@@ -240,9 +227,7 @@ func TestSaveOrphanedAtts_NoCommonAncestor(t *testing.T) {
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -277,9 +262,7 @@ func TestSaveOrphanedAtts_NoCommonAncestor(t *testing.T) {
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
|
||||
@@ -298,9 +281,8 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -336,9 +318,7 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
|
||||
@@ -367,9 +347,7 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -399,9 +377,7 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r2, r4))
|
||||
@@ -425,9 +401,7 @@ func TestSaveOrphanedAtts_NoCommonAncestor_DoublyLinkedTrie(t *testing.T) {
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -462,9 +436,7 @@ func TestSaveOrphanedAtts_NoCommonAncestor_DoublyLinkedTrie(t *testing.T) {
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
|
||||
@@ -488,9 +460,7 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -526,9 +496,7 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
|
||||
@@ -562,9 +530,7 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -594,9 +560,7 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r2, r4))
|
||||
@@ -621,17 +585,13 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlockBellatrix())
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
|
||||
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
|
||||
fcp := ðpb.Checkpoint{
|
||||
Root: bellatrixBlkRoot[:],
|
||||
Epoch: 0,
|
||||
}
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(fcp, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(fcp, [32]byte{'b'})
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
|
||||
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
|
||||
@@ -8,11 +8,20 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
)
|
||||
|
||||
// This saves a beacon block to the initial sync blocks cache.
|
||||
func (s *Service) saveInitSyncBlock(r [32]byte, b interfaces.SignedBeaconBlock) {
|
||||
// This saves a beacon block to the initial sync blocks cache. It rate limits how many blocks
|
||||
// the cache keeps in memory (2 epochs worth of blocks) and saves them to DB when it hits this limit.
|
||||
func (s *Service) saveInitSyncBlock(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock) error {
|
||||
s.initSyncBlocksLock.Lock()
|
||||
defer s.initSyncBlocksLock.Unlock()
|
||||
s.initSyncBlocks[r] = b
|
||||
numBlocks := len(s.initSyncBlocks)
|
||||
s.initSyncBlocksLock.Unlock()
|
||||
if uint64(numBlocks) > initialSyncBlockCacheSize {
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
s.clearInitSyncBlocks()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This checks if a beacon block exists in the initial sync blocks cache using the root
|
||||
|
||||
@@ -29,15 +29,13 @@ func TestService_getBlock(t *testing.T) {
|
||||
// block in cache
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(b1)
|
||||
require.NoError(t, err)
|
||||
s.saveInitSyncBlock(r1, b)
|
||||
s.saveInitSyncBlock(ctx, r1, b)
|
||||
got, err := s.getBlock(ctx, r1)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, b, got)
|
||||
|
||||
// block in db
|
||||
b, err = wrapper.WrappedSignedBeaconBlock(b2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
b = util.SaveBlock(t, ctx, s.cfg.BeaconDB, b2)
|
||||
got, err = s.getBlock(ctx, r2)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, b, got)
|
||||
@@ -61,12 +59,10 @@ func TestService_hasBlockInInitSyncOrDB(t *testing.T) {
|
||||
// block in cache
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(b1)
|
||||
require.NoError(t, err)
|
||||
s.saveInitSyncBlock(r1, b)
|
||||
s.saveInitSyncBlock(ctx, r1, b)
|
||||
require.Equal(t, true, s.hasBlockInInitSyncOrDB(ctx, r1))
|
||||
|
||||
// block in db
|
||||
b, err = wrapper.WrappedSignedBeaconBlock(b2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
util.SaveBlock(t, ctx, s.cfg.BeaconDB, b2)
|
||||
require.Equal(t, true, s.hasBlockInInitSyncOrDB(ctx, r2))
|
||||
}
|
||||
|
||||
@@ -45,12 +45,16 @@ func logStateTransitionData(b interfaces.BeaconBlock) error {
|
||||
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
|
||||
}
|
||||
if b.Version() == version.Bellatrix {
|
||||
p, err := b.Body().ExecutionPayload()
|
||||
p, err := b.Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash)))
|
||||
log = log.WithField("txCount", len(p.Transactions))
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
|
||||
txs, err := p.Transactions()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("txCount", len(txs))
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
return nil
|
||||
@@ -62,24 +66,30 @@ func logBlockSyncStatus(block interfaces.BeaconBlock, blockRoot [32]byte, justif
|
||||
return err
|
||||
}
|
||||
level := log.Logger.GetLevel()
|
||||
|
||||
log = log.WithField("slot", block.Slot())
|
||||
if level >= logrus.DebugLevel {
|
||||
log = log.WithField("slotInEpoch", block.Slot()%params.BeaconConfig().SlotsPerEpoch)
|
||||
log = log.WithField("justifiedEpoch", justified.Epoch)
|
||||
log = log.WithField("justifiedRoot", fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]))
|
||||
log = log.WithField("parentRoot", fmt.Sprintf("0x%s...", hex.EncodeToString(block.ParentRoot())[:8]))
|
||||
log = log.WithField("version", version.String(block.Version()))
|
||||
log = log.WithField("sinceSlotStartTime", prysmTime.Now().Sub(startTime))
|
||||
log = log.WithField("chainServiceProcessedTime", prysmTime.Now().Sub(receivedTime))
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(block.ParentRoot())[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime),
|
||||
}).Debug("Synced new block")
|
||||
} else {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
}).Info("Synced new block")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
}).Info("Synced new block")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -92,18 +102,18 @@ func logPayload(block interfaces.BeaconBlock) error {
|
||||
if !isExecutionBlk {
|
||||
return nil
|
||||
}
|
||||
payload, err := block.Body().ExecutionPayload()
|
||||
payload, err := block.Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.GasLimit == 0 {
|
||||
if payload.GasLimit() == 0 {
|
||||
return errors.New("gas limit should not be 0")
|
||||
}
|
||||
gasUtilized := float64(payload.GasUsed) / float64(payload.GasLimit)
|
||||
gasUtilized := float64(payload.GasUsed()) / float64(payload.GasLimit())
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash)),
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash())),
|
||||
"blockNumber": payload.BlockNumber,
|
||||
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
|
||||
}).Debug("Synced new payload")
|
||||
|
||||
@@ -1,131 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestService_newSlot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
bj, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // genesis
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 32, [32]byte{'a'}, [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // finalized
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // justified
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 96, bj, [32]byte{'a'}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // best justified
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 97, [32]byte{'d'}, [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // bad
|
||||
|
||||
type args struct {
|
||||
slot types.Slot
|
||||
finalized *ethpb.Checkpoint
|
||||
justified *ethpb.Checkpoint
|
||||
bestJustified *ethpb.Checkpoint
|
||||
shouldEqual bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
}{
|
||||
{
|
||||
name: "Not epoch boundary. No change",
|
||||
args: args{
|
||||
slot: params.BeaconConfig().SlotsPerEpoch + 1,
|
||||
finalized: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'a'}, 32)},
|
||||
justified: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'b'}, 32)},
|
||||
bestJustified: ðpb.Checkpoint{Epoch: 3, Root: bj[:]},
|
||||
shouldEqual: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Justified higher than best justified. No change",
|
||||
args: args{
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
finalized: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'a'}, 32)},
|
||||
justified: ðpb.Checkpoint{Epoch: 3, Root: bytesutil.PadTo([]byte{'b'}, 32)},
|
||||
bestJustified: ðpb.Checkpoint{Epoch: 2, Root: bj[:]},
|
||||
shouldEqual: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Best justified not on the same chain as finalized. No change",
|
||||
args: args{
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
finalized: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'a'}, 32)},
|
||||
justified: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'b'}, 32)},
|
||||
bestJustified: ðpb.Checkpoint{Epoch: 3, Root: bytesutil.PadTo([]byte{'d'}, 32)},
|
||||
shouldEqual: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Best justified on the same chain as finalized. Yes change",
|
||||
args: args{
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
finalized: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'a'}, 32)},
|
||||
justified: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'b'}, 32)},
|
||||
bestJustified: ðpb.Checkpoint{Epoch: 3, Root: bj[:]},
|
||||
shouldEqual: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
s := store.New(test.args.justified, test.args.finalized)
|
||||
s.SetBestJustifiedCheckpt(test.args.bestJustified)
|
||||
service.store = s
|
||||
|
||||
require.NoError(t, service.NewSlot(ctx, test.args.slot))
|
||||
if test.args.shouldEqual {
|
||||
bcp, err := service.store.BestJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
cp, err := service.store.JustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, bcp, cp)
|
||||
} else {
|
||||
bcp, err := service.store.BestJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
cp, err := service.store.JustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.DeepNotSSZEqual(t, bcp, cp)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -41,17 +40,17 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
|
||||
if err := wrapper.BeaconBlockIsNil(b); err != nil {
|
||||
return err
|
||||
}
|
||||
payload, err := b.Block().Body().ExecutionPayload()
|
||||
payload, err := b.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload == nil {
|
||||
if payload.IsNil() {
|
||||
return errors.New("nil execution payload")
|
||||
}
|
||||
if err := validateTerminalBlockHash(b.Block().Slot(), payload); err != nil {
|
||||
return errors.Wrap(err, "could not validate terminal block hash")
|
||||
}
|
||||
mergeBlockParentHash, mergeBlockTD, err := s.getBlkParentHashAndTD(ctx, payload.ParentHash)
|
||||
mergeBlockParentHash, mergeBlockTD, err := s.getBlkParentHashAndTD(ctx, payload.ParentHash())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get merge block parent hash and total difficulty")
|
||||
}
|
||||
@@ -66,12 +65,12 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
|
||||
if !valid {
|
||||
err := fmt.Errorf("invalid TTD, configTTD: %s, currentTTD: %s, parentTTD: %s",
|
||||
params.BeaconConfig().TerminalTotalDifficulty, mergeBlockTD, mergeBlockParentTD)
|
||||
return invalidBlock{err}
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Block().Slot(),
|
||||
"mergeBlockHash": common.BytesToHash(payload.ParentHash).String(),
|
||||
"mergeBlockHash": common.BytesToHash(payload.ParentHash()).String(),
|
||||
"mergeBlockParentHash": common.BytesToHash(mergeBlockParentHash).String(),
|
||||
"terminalTotalDifficulty": params.BeaconConfig().TerminalTotalDifficulty,
|
||||
"mergeBlockTotalDifficulty": mergeBlockTD,
|
||||
@@ -85,7 +84,7 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
|
||||
|
||||
// getBlkParentHashAndTD retrieves the parent hash and total difficulty of the given block.
|
||||
func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]byte, *uint256.Int, error) {
|
||||
blk, err := s.cfg.ExecutionEngineCaller.ExecutionBlockByHash(ctx, common.BytesToHash(blkHash))
|
||||
blk, err := s.cfg.ExecutionEngineCaller.ExecutionBlockByHash(ctx, common.BytesToHash(blkHash), false /* no txs */)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get pow block")
|
||||
}
|
||||
@@ -100,7 +99,7 @@ func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]
|
||||
if overflows {
|
||||
return nil, nil, errors.New("total difficulty overflows")
|
||||
}
|
||||
return blk.ParentHash, blkTDUint256, nil
|
||||
return blk.ParentHash[:], blkTDUint256, nil
|
||||
}
|
||||
|
||||
// validateTerminalBlockHash validates if the merge block is a valid terminal PoW block.
|
||||
@@ -110,14 +109,14 @@ func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]
|
||||
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
|
||||
// return
|
||||
func validateTerminalBlockHash(blkSlot types.Slot, payload *enginev1.ExecutionPayload) error {
|
||||
func validateTerminalBlockHash(blkSlot types.Slot, payload interfaces.ExecutionData) error {
|
||||
if bytesutil.ToBytes32(params.BeaconConfig().TerminalBlockHash.Bytes()) == [32]byte{} {
|
||||
return nil
|
||||
}
|
||||
if params.BeaconConfig().TerminalBlockHashActivationEpoch > slots.ToEpoch(blkSlot) {
|
||||
return errors.New("terminal block hash activation epoch not reached")
|
||||
}
|
||||
if !bytes.Equal(payload.ParentHash, params.BeaconConfig().TerminalBlockHash.Bytes()) {
|
||||
if !bytes.Equal(payload.ParentHash(), params.BeaconConfig().TerminalBlockHash.Bytes()) {
|
||||
return errors.New("parent hash does not match terminal block hash")
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -6,12 +6,12 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/holiman/uint256"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
mocks "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
@@ -120,12 +120,19 @@ func Test_validateMergeBlock(t *testing.T) {
|
||||
|
||||
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
engine.BlockByHashMap[[32]byte{'a'}] = &enginev1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
||||
a := [32]byte{'a'}
|
||||
b := [32]byte{'b'}
|
||||
mergeBlockParentHash := [32]byte{'3'}
|
||||
engine.BlockByHashMap[a] = &enginev1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: b,
|
||||
},
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
engine.BlockByHashMap[[32]byte{'b'}] = &enginev1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
||||
engine.BlockByHashMap[b] = &enginev1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: mergeBlockParentHash,
|
||||
},
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
@@ -133,18 +140,18 @@ func Test_validateMergeBlock(t *testing.T) {
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
ParentHash: a[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
bk, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.validateMergeBlock(ctx, b))
|
||||
require.NoError(t, service.validateMergeBlock(ctx, bk))
|
||||
|
||||
cfg.TerminalTotalDifficulty = "1"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
err = service.validateMergeBlock(ctx, b)
|
||||
err = service.validateMergeBlock(ctx, bk)
|
||||
require.ErrorContains(t, "invalid TTD, configTTD: 1, currentTTD: 2, parentTTD: 1", err)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
@@ -167,7 +174,9 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
p := [32]byte{'b'}
|
||||
td := "0x1"
|
||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
||||
ParentHash: p[:],
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: p,
|
||||
},
|
||||
TotalDifficulty: td,
|
||||
}
|
||||
parentHash, totalDifficulty, err := service.getBlkParentHashAndTD(ctx, h[:])
|
||||
@@ -183,14 +192,18 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
require.ErrorContains(t, "pow block is nil", err)
|
||||
|
||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
||||
ParentHash: p[:],
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: p,
|
||||
},
|
||||
TotalDifficulty: "1",
|
||||
}
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
||||
require.ErrorContains(t, "could not decode merge block total difficulty: hex string without 0x prefix", err)
|
||||
|
||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
||||
ParentHash: p[:],
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: p,
|
||||
},
|
||||
TotalDifficulty: "0XFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
|
||||
}
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
||||
@@ -198,16 +211,22 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_validateTerminalBlockHash(t *testing.T) {
|
||||
require.NoError(t, validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
||||
wrapped, err := wrapper.WrappedExecutionPayload(&enginev1.ExecutionPayload{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, validateTerminalBlockHash(1, wrapped))
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalBlockHash = [32]byte{0x01}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
require.ErrorContains(t, "terminal block hash activation epoch not reached", validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
||||
require.ErrorContains(t, "terminal block hash activation epoch not reached", validateTerminalBlockHash(1, wrapped))
|
||||
|
||||
cfg.TerminalBlockHashActivationEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
require.ErrorContains(t, "parent hash does not match terminal block hash", validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
||||
require.ErrorContains(t, "parent hash does not match terminal block hash", validateTerminalBlockHash(1, wrapped))
|
||||
|
||||
require.NoError(t, validateTerminalBlockHash(1, &enginev1.ExecutionPayload{ParentHash: cfg.TerminalBlockHash.Bytes()}))
|
||||
wrapped, err = wrapper.WrappedExecutionPayload(&enginev1.ExecutionPayload{
|
||||
ParentHash: cfg.TerminalBlockHash.Bytes(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, validateTerminalBlockHash(1, wrapped))
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
@@ -38,20 +37,16 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
|
||||
_, err = blockTree1(t, beaconDB, []byte{'g'})
|
||||
require.NoError(t, err)
|
||||
|
||||
BlkWithOutState := util.NewBeaconBlock()
|
||||
BlkWithOutState.Block.Slot = 0
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(BlkWithOutState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
BlkWithOutStateRoot, err := BlkWithOutState.Block.HashTreeRoot()
|
||||
blkWithoutState := util.NewBeaconBlock()
|
||||
blkWithoutState.Block.Slot = 0
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithoutState)
|
||||
BlkWithOutStateRoot, err := blkWithoutState.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
BlkWithStateBadAtt := util.NewBeaconBlock()
|
||||
BlkWithStateBadAtt.Block.Slot = 1
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithStateBadAtt)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
BlkWithStateBadAttRoot, err := BlkWithStateBadAtt.Block.HashTreeRoot()
|
||||
blkWithStateBadAtt := util.NewBeaconBlock()
|
||||
blkWithStateBadAtt.Block.Slot = 1
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithStateBadAtt)
|
||||
BlkWithStateBadAttRoot, err := blkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
@@ -59,13 +54,11 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
|
||||
require.NoError(t, s.SetSlot(100*params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
|
||||
|
||||
BlkWithValidState := util.NewBeaconBlock()
|
||||
BlkWithValidState.Block.Slot = 2
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithValidState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
blkWithValidState := util.NewBeaconBlock()
|
||||
blkWithValidState.Block.Slot = 2
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithValidState)
|
||||
|
||||
BlkWithValidStateRoot, err := BlkWithValidState.Block.HashTreeRoot()
|
||||
blkWithValidStateRoot, err := blkWithValidState.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s, err = util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -75,7 +68,7 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithValidStateRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, blkWithValidStateRoot))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -150,20 +143,16 @@ func TestStore_OnAttestation_ErrorConditions_DoublyLinkedTree(t *testing.T) {
|
||||
_, err = blockTree1(t, beaconDB, []byte{'g'})
|
||||
require.NoError(t, err)
|
||||
|
||||
BlkWithOutState := util.NewBeaconBlock()
|
||||
BlkWithOutState.Block.Slot = 0
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(BlkWithOutState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
BlkWithOutStateRoot, err := BlkWithOutState.Block.HashTreeRoot()
|
||||
blkWithoutState := util.NewBeaconBlock()
|
||||
blkWithoutState.Block.Slot = 0
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithoutState)
|
||||
BlkWithOutStateRoot, err := blkWithoutState.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
BlkWithStateBadAtt := util.NewBeaconBlock()
|
||||
BlkWithStateBadAtt.Block.Slot = 1
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithStateBadAtt)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
BlkWithStateBadAttRoot, err := BlkWithStateBadAtt.Block.HashTreeRoot()
|
||||
blkWithStateBadAtt := util.NewBeaconBlock()
|
||||
blkWithStateBadAtt.Block.Slot = 1
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithStateBadAtt)
|
||||
BlkWithStateBadAttRoot, err := blkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
@@ -171,13 +160,11 @@ func TestStore_OnAttestation_ErrorConditions_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, s.SetSlot(100*params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
|
||||
|
||||
BlkWithValidState := util.NewBeaconBlock()
|
||||
BlkWithValidState.Block.Slot = 2
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithValidState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
blkWithValidState := util.NewBeaconBlock()
|
||||
blkWithValidState.Block.Slot = 2
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithValidState)
|
||||
|
||||
BlkWithValidStateRoot, err := BlkWithValidState.Block.HashTreeRoot()
|
||||
blkWithValidStateRoot, err := blkWithValidState.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s, err = util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -187,7 +174,7 @@ func TestStore_OnAttestation_ErrorConditions_DoublyLinkedTree(t *testing.T) {
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithValidStateRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, blkWithValidStateRoot))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -269,8 +256,8 @@ func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
ojc := ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ojc := ðpb.Checkpoint{Epoch: 1, Root: tRoot[:]}
|
||||
ofc := ðpb.Checkpoint{Epoch: 1, Root: tRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
@@ -333,12 +320,6 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
r := [32]byte{'g'}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r))
|
||||
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'b'})
|
||||
service.store.SetPrevFinalizedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
|
||||
r = bytesutil.ToBytes32([]byte{'A'})
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}))
|
||||
@@ -367,10 +348,6 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
|
||||
|
||||
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'b'})
|
||||
service.store.SetPrevFinalizedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
cp3 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}))
|
||||
@@ -459,9 +436,7 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 2
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
d := ðpb.AttestationData{Slot: 1, BeaconBlockRoot: r[:]}
|
||||
@@ -478,9 +453,7 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 2
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
d := ðpb.AttestationData{Slot: 2, BeaconBlockRoot: r[:]}
|
||||
@@ -503,19 +476,15 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b32)
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 1}, [32]byte{})
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b33)
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -538,19 +507,15 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b32)
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 1}, [32]byte{})
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b33)
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -567,20 +532,15 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b32)
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r32[:], Epoch: 1}, [32]byte{})
|
||||
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1, Root: r32}))
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b33)
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -600,8 +560,6 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r32[:], Epoch: 1}, [32]byte{})
|
||||
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
|
||||
@@ -17,13 +17,13 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/forks/bellatrix"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/tracing"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
@@ -96,7 +96,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
|
||||
defer span.End()
|
||||
if err := wrapper.BeaconBlockIsNil(signed); err != nil {
|
||||
return invalidBlock{err}
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
b := signed.Block()
|
||||
|
||||
@@ -105,13 +105,19 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
return err
|
||||
}
|
||||
|
||||
// Save current justified and finalized epochs for future use.
|
||||
currStoreJustifiedEpoch := s.ForkChoicer().JustifiedCheckpoint().Epoch
|
||||
currStoreFinalizedEpoch := s.ForkChoicer().FinalizedCheckpoint().Epoch
|
||||
preStateFinalizedEpoch := preState.FinalizedCheckpoint().Epoch
|
||||
preStateJustifiedEpoch := preState.CurrentJustifiedCheckpoint().Epoch
|
||||
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return invalidBlock{err}
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
|
||||
if err != nil {
|
||||
@@ -119,7 +125,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
}
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, postStateVersion, postStateHeader, signed)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not verify new payload: %v", err)
|
||||
return errors.Wrap(err, "could not validate new payload")
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preStateVersion, preStateHeader, signed); err != nil {
|
||||
@@ -129,6 +135,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
|
||||
}
|
||||
@@ -139,18 +146,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
}
|
||||
}
|
||||
|
||||
// We add a proposer score boost to fork choice for the block root if applicable, right after
|
||||
// running a successful state transition for the block.
|
||||
secondsIntoSlot := uint64(time.Since(s.genesisTime).Seconds()) % params.BeaconConfig().SecondsPerSlot
|
||||
if err := s.cfg.ForkChoiceStore.BoostProposerRoot(ctx, &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: blockRoot,
|
||||
BlockSlot: signed.Block().Slot(),
|
||||
CurrentSlot: slots.SinceGenesis(s.genesisTime),
|
||||
SecondsIntoSlot: secondsIntoSlot,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If slasher is configured, forward the attestations in the block via
|
||||
// an event feed for processing.
|
||||
if features.Get().EnableSlasher {
|
||||
@@ -178,52 +173,8 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
}()
|
||||
}
|
||||
|
||||
// Update justified check point.
|
||||
justified, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
currJustifiedEpoch := justified.Epoch
|
||||
psj := postState.CurrentJustifiedCheckpoint()
|
||||
if psj == nil {
|
||||
return errNilJustifiedCheckpoint
|
||||
}
|
||||
|
||||
if psj.Epoch > currJustifiedEpoch {
|
||||
if err := s.updateJustified(ctx, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
psf := postState.FinalizedCheckpoint()
|
||||
if psf == nil {
|
||||
return errNilFinalizedCheckpoint
|
||||
}
|
||||
|
||||
newFinalized := psf.Epoch > finalized.Epoch
|
||||
if newFinalized {
|
||||
s.store.SetPrevFinalizedCheckpt(finalized)
|
||||
h, err := s.getPayloadHash(ctx, psf.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(psf, h)
|
||||
s.store.SetPrevJustifiedCheckpt(justified)
|
||||
h, err = s.getPayloadHash(ctx, psj.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(postState.CurrentJustifiedCheckpoint(), h)
|
||||
}
|
||||
|
||||
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(justified.Root))
|
||||
justified := s.ForkChoicer().JustifiedCheckpoint()
|
||||
balances, err := s.justifiedBalances.get(ctx, justified.Root)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", justified.Root)
|
||||
return errors.Wrap(err, msg)
|
||||
@@ -232,7 +183,9 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
s.notifyEngineIfChangedHead(ctx, headRoot)
|
||||
if err := s.notifyEngineIfChangedHead(ctx, headRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.pruneCanonicalAttsFromPool(ctx, blockRoot, signed); err != nil {
|
||||
return err
|
||||
@@ -262,22 +215,23 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
}()
|
||||
|
||||
// Save justified check point to db.
|
||||
if postState.CurrentJustifiedCheckpoint().Epoch > currJustifiedEpoch {
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, postState.CurrentJustifiedCheckpoint()); err != nil {
|
||||
postStateJustifiedEpoch := postState.CurrentJustifiedCheckpoint().Epoch
|
||||
if justified.Epoch > currStoreJustifiedEpoch || (justified.Epoch == postStateJustifiedEpoch && justified.Epoch > preStateJustifiedEpoch) {
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{
|
||||
Epoch: justified.Epoch, Root: justified.Root[:],
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update finalized check point.
|
||||
if newFinalized {
|
||||
if err := s.updateFinalized(ctx, postState.FinalizedCheckpoint()); err != nil {
|
||||
// Save finalized check point to db and more.
|
||||
postStateFinalizedEpoch := postState.FinalizedCheckpoint().Epoch
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
if finalized.Epoch > currStoreFinalizedEpoch || (finalized.Epoch == postStateFinalizedEpoch && finalized.Epoch > preStateFinalizedEpoch) {
|
||||
if err := s.updateFinalized(ctx, ðpb.Checkpoint{Epoch: finalized.Epoch, Root: finalized.Root[:]}); err != nil {
|
||||
return err
|
||||
}
|
||||
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
|
||||
if err := s.cfg.ForkChoiceStore.Prune(ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not prune fork choice nodes")
|
||||
}
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(finalized.Root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if node is optimistically synced")
|
||||
}
|
||||
@@ -298,23 +252,21 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
// with a custom deadline, therefore using the background context instead.
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
defer cancel()
|
||||
if err := s.insertFinalizedDeposits(depCtx, fRoot); err != nil {
|
||||
if err := s.insertFinalizedDeposits(depCtx, finalized.Root); err != nil {
|
||||
log.WithError(err).Error("Could not insert finalized deposits.")
|
||||
}
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
defer reportAttestationInclusion(b)
|
||||
|
||||
return s.handleEpochBoundary(ctx, postState)
|
||||
}
|
||||
|
||||
func getStateVersionAndPayload(st state.BeaconState) (int, *ethpb.ExecutionPayloadHeader, error) {
|
||||
func getStateVersionAndPayload(st state.BeaconState) (int, *enginev1.ExecutionPayloadHeader, error) {
|
||||
if st == nil {
|
||||
return 0, nil, errors.New("nil state")
|
||||
}
|
||||
var preStateHeader *ethpb.ExecutionPayloadHeader
|
||||
var preStateHeader *enginev1.ExecutionPayloadHeader
|
||||
var err error
|
||||
preStateVersion := st.Version()
|
||||
switch preStateVersion {
|
||||
@@ -342,7 +294,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
}
|
||||
|
||||
if err := wrapper.BeaconBlockIsNil(blks[0]); err != nil {
|
||||
return invalidBlock{err}
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
b := blks[0].Block()
|
||||
|
||||
@@ -372,7 +324,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
}
|
||||
type versionAndHeader struct {
|
||||
version int
|
||||
header *ethpb.ExecutionPayloadHeader
|
||||
header *enginev1.ExecutionPayloadHeader
|
||||
}
|
||||
preVersionAndHeaders := make([]*versionAndHeader, len(blks))
|
||||
postVersionAndHeaders := make([]*versionAndHeader, len(blks))
|
||||
@@ -390,7 +342,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
|
||||
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
|
||||
if err != nil {
|
||||
return invalidBlock{err}
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
// Save potential boundary states.
|
||||
if slots.IsEpochStart(preState.Slot()) {
|
||||
@@ -411,7 +363,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
}
|
||||
verify, err := sigSet.Verify()
|
||||
if err != nil {
|
||||
return invalidBlock{err}
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
if !verify {
|
||||
return errors.New("batch block signature verification failed")
|
||||
@@ -437,11 +389,29 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
pendingNodes[len(blks)-i-1] = args
|
||||
s.saveInitSyncBlock(blockRoots[i], b)
|
||||
if err = s.handleBlockAfterBatchVerify(ctx, b, blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
|
||||
if err := s.saveInitSyncBlock(ctx, blockRoots[i], b); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: b.Block().Slot(),
|
||||
Root: blockRoots[i][:],
|
||||
}); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if i > 0 && jCheckpoints[i].Epoch > jCheckpoints[i-1].Epoch {
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, jCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
if i > 0 && fCheckpoints[i].Epoch > fCheckpoints[i-1].Epoch {
|
||||
if err := s.updateFinalized(ctx, fCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
// Insert all nodes but the last one to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertOptimisticChain(ctx, pendingNodes); err != nil {
|
||||
@@ -452,14 +422,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastBR); err != nil {
|
||||
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
|
||||
}
|
||||
// Prune forkchoice store only if the new finalized checkpoint is higher
|
||||
// than the finalized checkpoint in forkchoice store.
|
||||
if fCheckpoints[len(blks)-1].Epoch > s.cfg.ForkChoiceStore.FinalizedCheckpoint().Epoch {
|
||||
if err := s.cfg.ForkChoiceStore.Prune(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(fCheckpoints[len(blks)-1].Root))); err != nil {
|
||||
return errors.Wrap(err, "could not prune fork choice nodes")
|
||||
}
|
||||
}
|
||||
|
||||
// Set their optimistic status
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, lastBR); err != nil {
|
||||
@@ -488,62 +450,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState)
|
||||
}
|
||||
|
||||
// handles a block after the block's batch has been verified, where we can save blocks
|
||||
// their state summaries and split them off to relative hot/cold storage.
|
||||
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed interfaces.SignedBeaconBlock,
|
||||
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
|
||||
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: signed.Block().Slot(),
|
||||
Root: blockRoot[:],
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rate limit how many blocks (2 epochs worth of blocks) a node keeps in the memory.
|
||||
if uint64(len(s.getInitSyncBlocks())) > initialSyncBlockCacheSize {
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
s.clearInitSyncBlocks()
|
||||
}
|
||||
|
||||
justified, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
if jCheckpoint.Epoch > justified.Epoch {
|
||||
if err := s.updateJustifiedInitSync(ctx, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
// Update finalized check point. Prune the block cache and helper caches on every new finalized epoch.
|
||||
if fCheckpoint.Epoch > finalized.Epoch {
|
||||
if err := s.updateFinalized(ctx, fCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetPrevFinalizedCheckpt(finalized)
|
||||
h, err := s.getPayloadHash(ctx, fCheckpoint.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(fCheckpoint, h)
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Epoch: fCheckpoint.Epoch, Root: bytesutil.ToBytes32(fCheckpoint.Root)}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Epoch boundary bookkeeping such as logging epoch summaries.
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, postState state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
|
||||
@@ -595,9 +501,15 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockAndAttestationsToForkChoiceStore")
|
||||
defer span.End()
|
||||
|
||||
fCheckpoint := st.FinalizedCheckpoint()
|
||||
jCheckpoint := st.CurrentJustifiedCheckpoint()
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, st, fCheckpoint, jCheckpoint); err != nil {
|
||||
if !s.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(blk.ParentRoot())) {
|
||||
fCheckpoint := st.FinalizedCheckpoint()
|
||||
jCheckpoint := st.CurrentJustifiedCheckpoint()
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, st, root); err != nil {
|
||||
return err
|
||||
}
|
||||
// Feed in block's attestations to fork choice store.
|
||||
@@ -615,14 +527,7 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk interfaces.BeaconBlock, root [32]byte, st state.BeaconState, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
}
|
||||
|
||||
// Inserts attester slashing indices to fork choice store.
|
||||
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
|
||||
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
|
||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
|
||||
for _, slashing := range slashings {
|
||||
@@ -674,18 +579,22 @@ func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b
|
||||
}
|
||||
|
||||
// validateMergeTransitionBlock validates the merge transition block.
|
||||
func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion int, stateHeader *ethpb.ExecutionPayloadHeader, blk interfaces.SignedBeaconBlock) error {
|
||||
func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion int, stateHeader *enginev1.ExecutionPayloadHeader, blk interfaces.SignedBeaconBlock) error {
|
||||
// Skip validation if block is older than Bellatrix.
|
||||
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip validation if block has an empty payload.
|
||||
payload, err := blk.Block().Body().ExecutionPayload()
|
||||
payload, err := blk.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return invalidBlock{err}
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
if bellatrix.IsEmptyPayload(payload) {
|
||||
isEmpty, err := wrapper.IsEmptyExecutionData(payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isEmpty {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -696,11 +605,16 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
||||
}
|
||||
|
||||
// Skip validation if the block is not a merge transition block.
|
||||
atTransition, err := blocks.IsMergeTransitionBlockUsingPreStatePayloadHeader(stateHeader, blk.Block().Body())
|
||||
// To reach here. The payload must be non-empty. If the state header is empty then it's at transition.
|
||||
wh, err := wrapper.WrappedExecutionPayloadHeader(stateHeader)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if merge block is terminal")
|
||||
return err
|
||||
}
|
||||
if !atTransition {
|
||||
empty, err := wrapper.IsEmptyExecutionData(wh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !empty {
|
||||
return nil
|
||||
}
|
||||
return s.validateMergeBlock(ctx, blk)
|
||||
|
||||
@@ -95,17 +95,13 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.BeaconBloc
|
||||
func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.VerifyFinalizedBlkDescendant")
|
||||
defer span.End()
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
finalizedBlkSigned, err := s.getBlock(ctx, fRoot)
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
fRoot := s.ensureRootNotZeros(finalized.Root)
|
||||
fSlot, err := slots.EpochStart(finalized.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finalizedBlk := finalizedBlkSigned.Block()
|
||||
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot())
|
||||
bFinalizedRoot, err := s.ancestor(ctx, root[:], fSlot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block root")
|
||||
}
|
||||
@@ -115,10 +111,10 @@ func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byt
|
||||
|
||||
if !bytes.Equal(bFinalizedRoot, fRoot[:]) {
|
||||
err := fmt.Errorf("block %#x is not a descendant of the current finalized block slot %d, %#x != %#x",
|
||||
bytesutil.Trunc(root[:]), finalizedBlk.Slot(), bytesutil.Trunc(bFinalizedRoot),
|
||||
bytesutil.Trunc(root[:]), fSlot, bytesutil.Trunc(bFinalizedRoot),
|
||||
bytesutil.Trunc(fRoot[:]))
|
||||
tracing.AnnotateError(span, err)
|
||||
return invalidBlock{err}
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -126,123 +122,20 @@ func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byt
|
||||
// verifyBlkFinalizedSlot validates input block is not less than or equal
|
||||
// to current finalized slot.
|
||||
func (s *Service) verifyBlkFinalizedSlot(b interfaces.BeaconBlock) error {
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
finalizedSlot, err := slots.EpochStart(finalized.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if finalizedSlot >= b.Slot() {
|
||||
err = fmt.Errorf("block is equal or earlier than finalized block, slot %d < slot %d", b.Slot(), finalizedSlot)
|
||||
return invalidBlock{err}
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldUpdateCurrentJustified prevents bouncing attack, by only update conflicting justified
|
||||
// checkpoints in the fork choice if in the early slots of the epoch.
|
||||
// Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
|
||||
//
|
||||
// Spec code:
|
||||
// def should_update_justified_checkpoint(store: Store, new_justified_checkpoint: Checkpoint) -> bool:
|
||||
// """
|
||||
// To address the bouncing attack, only update conflicting justified
|
||||
// checkpoints in the fork choice if in the early slots of the epoch.
|
||||
// Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
|
||||
//
|
||||
// See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion.
|
||||
// """
|
||||
// if compute_slots_since_epoch_start(get_current_slot(store)) < SAFE_SLOTS_TO_UPDATE_JUSTIFIED:
|
||||
// return True
|
||||
//
|
||||
// justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
|
||||
// if not get_ancestor(store, new_justified_checkpoint.root, justified_slot) == store.justified_checkpoint.root:
|
||||
// return False
|
||||
//
|
||||
// return True
|
||||
func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustifiedCheckpt *ethpb.Checkpoint) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.shouldUpdateCurrentJustified")
|
||||
defer span.End()
|
||||
|
||||
if slots.SinceEpochStarts(s.CurrentSlot()) < params.BeaconConfig().SafeSlotsToUpdateJustified {
|
||||
return true, nil
|
||||
}
|
||||
justified, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
jSlot, err := slots.EpochStart(justified.Epoch)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
justifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(newJustifiedCheckpt.Root))
|
||||
b, err := s.ancestor(ctx, justifiedRoot[:], jSlot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !bytes.Equal(b, justified.Root) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.updateJustified")
|
||||
defer span.End()
|
||||
|
||||
cpt := state.CurrentJustifiedCheckpoint()
|
||||
bestJustified, err := s.store.BestJustifiedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get best justified checkpoint")
|
||||
}
|
||||
if cpt.Epoch > bestJustified.Epoch {
|
||||
s.store.SetBestJustifiedCheckpt(cpt)
|
||||
}
|
||||
canUpdate, err := s.shouldUpdateCurrentJustified(ctx, cpt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if canUpdate {
|
||||
justified, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
s.store.SetPrevJustifiedCheckpt(justified)
|
||||
h, err := s.getPayloadHash(ctx, cpt.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(cpt, h)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This caches input checkpoint as justified for the service struct. It rotates current justified to previous justified,
|
||||
// caches justified checkpoint balances for fork choice and save justified checkpoint in DB.
|
||||
// This method does not have defense against fork choice bouncing attack, which is why it's only recommend to be used during initial syncing.
|
||||
func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpoint) error {
|
||||
justified, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
s.store.SetPrevJustifiedCheckpt(justified)
|
||||
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp); err != nil {
|
||||
return err
|
||||
}
|
||||
h, err := s.getPayloadHash(ctx, cp.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(cp, h)
|
||||
return s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Epoch: cp.Epoch, Root: bytesutil.ToBytes32(cp.Root)})
|
||||
}
|
||||
|
||||
// updateFinalized saves the init sync blocks, finalized checkpoint, migrates
|
||||
// to cold old states and saves the last validated checkpoint to DB
|
||||
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.updateFinalized")
|
||||
defer span.End()
|
||||
@@ -348,10 +241,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
|
||||
// Fork choice only matters from last finalized slot.
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
fSlot, err := slots.EpochStart(finalized.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -377,7 +267,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
if len(pendingNodes) == 1 {
|
||||
return nil
|
||||
}
|
||||
if root != s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root)) {
|
||||
if root != s.ensureRootNotZeros(finalized.Root) {
|
||||
return errNotDescendantOfFinalized
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.InsertOptimisticChain(ctx, pendingNodes)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -71,10 +71,7 @@ func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) e
|
||||
return nil
|
||||
}
|
||||
|
||||
f, err := s.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f := s.FinalizedCheckpt()
|
||||
ss, err := slots.EpochStart(f.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -123,7 +120,7 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-st.C():
|
||||
if err := s.NewSlot(s.ctx, s.CurrentSlot()); err != nil {
|
||||
if err := s.ForkChoicer().NewSlot(s.ctx, s.CurrentSlot()); err != nil {
|
||||
log.WithError(err).Error("Could not process new slot")
|
||||
return
|
||||
}
|
||||
@@ -152,11 +149,8 @@ func (s *Service) UpdateHead(ctx context.Context) error {
|
||||
|
||||
s.processAttestations(ctx)
|
||||
|
||||
justified, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(justified.Root))
|
||||
justified := s.ForkChoicer().JustifiedCheckpoint()
|
||||
balances, err := s.justifiedBalances.get(ctx, justified.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -172,33 +166,35 @@ func (s *Service) UpdateHead(ctx context.Context) error {
|
||||
}).Debug("Head changed due to attestations")
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
s.notifyEngineIfChangedHead(ctx, newHeadRoot)
|
||||
if err := s.notifyEngineIfChangedHead(ctx, newHeadRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This calls notify Forkchoice Update in the event that the head has changed
|
||||
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) {
|
||||
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) error {
|
||||
s.headLock.RLock()
|
||||
if newHeadRoot == [32]byte{} || s.headRoot() == newHeadRoot {
|
||||
s.headLock.RUnlock()
|
||||
return
|
||||
return nil
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
|
||||
if !s.hasBlockInInitSyncOrDB(ctx, newHeadRoot) {
|
||||
log.Debug("New head does not exist in DB. Do nothing")
|
||||
return // We don't have the block, don't notify the engine and update head.
|
||||
return nil // We don't have the block, don't notify the engine and update head.
|
||||
}
|
||||
|
||||
newHeadBlock, err := s.getBlock(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get new head block")
|
||||
return
|
||||
return nil
|
||||
}
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get state from db")
|
||||
return
|
||||
return nil
|
||||
}
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
headState: headState,
|
||||
@@ -207,11 +203,12 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdate(s.ctx, arg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not notify forkchoice update")
|
||||
return err
|
||||
}
|
||||
if err := s.saveHead(ctx, newHeadRoot, newHeadBlock, headState); err != nil {
|
||||
log.WithError(err).Error("could not save head")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
|
||||
@@ -49,17 +49,13 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b32)
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b33)
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -80,17 +76,13 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
|
||||
b32 := util.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b32)
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b33 := util.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b33)
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -140,15 +132,15 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
service.notifyEngineIfChangedHead(ctx, service.headRoot())
|
||||
require.NoError(t, service.notifyEngineIfChangedHead(ctx, service.headRoot()))
|
||||
hookErr := "could not notify forkchoice update"
|
||||
invalidStateErr := "Could not get state from db"
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
gb, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock([32]byte{'a'}, gb)
|
||||
service.notifyEngineIfChangedHead(ctx, [32]byte{'a'})
|
||||
require.NoError(t, service.saveInitSyncBlock(ctx, [32]byte{'a'}, gb))
|
||||
require.NoError(t, service.notifyEngineIfChangedHead(ctx, [32]byte{'a'}))
|
||||
require.LogsContain(t, hook, invalidStateErr)
|
||||
|
||||
hook.Reset()
|
||||
@@ -164,8 +156,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
r1, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(r1, wsb)
|
||||
finalized := ðpb.Checkpoint{Root: r1[:], Epoch: 0}
|
||||
require.NoError(t, service.saveInitSyncBlock(ctx, r1, wsb))
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
service.head = &head{
|
||||
slot: 1,
|
||||
@@ -174,20 +165,16 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
state: st,
|
||||
}
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(finalized, [32]byte{})
|
||||
service.notifyEngineIfChangedHead(ctx, r1)
|
||||
require.NoError(t, service.notifyEngineIfChangedHead(ctx, r1))
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
|
||||
// Block in DB
|
||||
b = util.NewBeaconBlock()
|
||||
b.Block.Slot = 3
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
|
||||
r1, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
finalized = ðpb.Checkpoint{Root: r1[:], Epoch: 0}
|
||||
st, _ = util.DeterministicGenesisState(t, 1)
|
||||
service.head = &head{
|
||||
slot: 1,
|
||||
@@ -196,8 +183,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
state: st,
|
||||
}
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(finalized, [32]byte{})
|
||||
service.notifyEngineIfChangedHead(ctx, r1)
|
||||
require.NoError(t, service.notifyEngineIfChangedHead(ctx, r1))
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2)
|
||||
@@ -207,7 +193,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
|
||||
// Test zero headRoot returns immediately.
|
||||
headRoot := service.headRoot()
|
||||
service.notifyEngineIfChangedHead(ctx, [32]byte{})
|
||||
require.NoError(t, service.notifyEngineIfChangedHead(ctx, [32]byte{}))
|
||||
require.Equal(t, service.headRoot(), headRoot)
|
||||
}
|
||||
|
||||
@@ -254,9 +240,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
b.Block.ParentRoot = service.originBlockRoot[:]
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wb))
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 2, r, service.originBlockRoot, [32]byte{'b'}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -59,17 +59,11 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaco
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
justified, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
finalized := s.FinalizedCheckpt()
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
|
||||
// Log block sync status.
|
||||
justified := s.CurrentJustifiedCheckpt()
|
||||
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
|
||||
log.WithError(err).Error("Unable to log block sync status")
|
||||
}
|
||||
@@ -113,20 +107,14 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
|
||||
})
|
||||
|
||||
// Reports on blockCopy and fork choice metrics.
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
finalized := s.FinalizedCheckpt()
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
}
|
||||
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
finalized := s.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
@@ -179,10 +167,7 @@ func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
|
||||
currentEpoch := slots.ToEpoch(s.CurrentSlot())
|
||||
// Prevent `sinceFinality` going underflow.
|
||||
var sinceFinality types.Epoch
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finalized := s.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
|
||||
@@ -137,12 +137,6 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
h := [32]byte{'a'}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, h)
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
|
||||
@@ -178,11 +172,6 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
s, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wg := sync.WaitGroup{}
|
||||
@@ -256,12 +245,6 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = s.saveGenesisData(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
|
||||
@@ -290,17 +273,15 @@ func TestService_HasBlock(t *testing.T) {
|
||||
}
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
s.saveInitSyncBlock(r, wsb)
|
||||
require.NoError(t, s.saveInitSyncBlock(context.Background(), r, wsb))
|
||||
if !s.HasBlock(context.Background(), r) {
|
||||
t.Error("Should have block")
|
||||
}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), s.cfg.BeaconDB, b)
|
||||
r, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
|
||||
require.Equal(t, true, s.HasBlock(context.Background(), r))
|
||||
}
|
||||
|
||||
@@ -311,7 +292,6 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{}, [32]byte{})
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
assert.LogsContain(t, hook, "Entering mode to save hot states in DB")
|
||||
@@ -324,7 +304,6 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{}, [32]byte{})
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
@@ -337,7 +316,6 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{}, [32]byte{})
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
@@ -65,7 +64,6 @@ type Service struct {
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
justifiedBalances *stateBalanceCache
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
store *store.Store
|
||||
processAttestationsLock sync.Mutex
|
||||
}
|
||||
|
||||
@@ -103,7 +101,6 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
initSyncBlocks: make(map[[32]byte]interfaces.SignedBeaconBlock),
|
||||
cfg: &config{},
|
||||
store: &store.Store{},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(srv); err != nil {
|
||||
@@ -204,7 +201,6 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if finalized == nil {
|
||||
return errNilFinalizedCheckpoint
|
||||
}
|
||||
s.store = store.New(justified, finalized)
|
||||
|
||||
var forkChoicer f.ForkChoicer
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
@@ -472,10 +468,6 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
s.originBlockRoot = genesisBlkRoot
|
||||
s.cfg.StateGen.SaveFinalizedState(0 /*slot*/, genesisBlkRoot, genesisState)
|
||||
|
||||
// Finalized checkpoint at genesis is a zero hash.
|
||||
genesisCheckpoint := genesisState.FinalizedCheckpoint()
|
||||
s.store = store.New(genesisCheckpoint, genesisCheckpoint)
|
||||
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
log.Fatalf("Could not process genesis block for fork choice: %v", err)
|
||||
}
|
||||
|
||||
@@ -9,10 +9,9 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
@@ -152,9 +151,7 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
genesisBlk := util.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(1))
|
||||
@@ -189,9 +186,7 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
|
||||
genesisBlk := util.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
wsb := util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
@@ -222,9 +217,9 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
count := uint64(10)
|
||||
deposits, _, err := util.DeterministicDepositsAndKeys(count)
|
||||
require.NoError(t, err)
|
||||
trie, _, err := util.DepositTrieFromDeposits(deposits)
|
||||
dt, _, err := util.DepositTrieFromDeposits(deposits)
|
||||
require.NoError(t, err)
|
||||
hashTreeRoot, err := trie.HashTreeRoot()
|
||||
hashTreeRoot, err := dt.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
genState, err := transition.EmptyGenesisState()
|
||||
require.NoError(t, err)
|
||||
@@ -234,7 +229,7 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
BlockHash: make([]byte, 32),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
genState, err = b.ProcessPreGenesisDeposits(ctx, genState, deposits)
|
||||
genState, err = blocks.ProcessPreGenesisDeposits(ctx, genState, deposits)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = bc.initializeBeaconChain(ctx, time.Unix(0, 0), genState, ðpb.Eth1Data{DepositRoot: hashTreeRoot[:], BlockHash: make([]byte, 32)})
|
||||
@@ -263,9 +258,7 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
genesisBlk := util.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(0))
|
||||
@@ -277,10 +270,9 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
// Test the start function.
|
||||
chainService.Start()
|
||||
|
||||
cp, err := chainService.store.FinalizedCheckpt()
|
||||
require.NoError(t, err)
|
||||
cp := chainService.FinalizedCheckpt()
|
||||
require.DeepEqual(t, blkRoot[:], cp.Root, "Finalize Checkpoint root is incorrect")
|
||||
cp, err = chainService.store.JustifiedCheckpt()
|
||||
cp = chainService.CurrentJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash[:], cp.Root, "Justified Checkpoint root is incorrect")
|
||||
|
||||
@@ -296,9 +288,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := util.NewBeaconBlock()
|
||||
@@ -312,9 +302,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(headBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
|
||||
require.NoError(t, err)
|
||||
@@ -346,9 +334,7 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := util.NewBeaconBlock()
|
||||
@@ -362,9 +348,7 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(headBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
|
||||
require.NoError(t, err)
|
||||
ss := ðpb.StateSummary{
|
||||
@@ -403,18 +387,14 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
genesisRoot, err := genesisBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesisBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesisBlock)
|
||||
|
||||
finalizedBlock := util.NewBeaconBlock()
|
||||
finalizedBlock.Block.Slot = finalizedSlot
|
||||
finalizedBlock.Block.ParentRoot = genesisRoot[:]
|
||||
finalizedRoot, err := finalizedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(finalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, beaconDB, finalizedBlock)
|
||||
|
||||
// Set head slot close to the finalization point, no head sync is triggered.
|
||||
headBlock := util.NewBeaconBlock()
|
||||
@@ -422,9 +402,7 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
headBlock.Block.ParentRoot = finalizedRoot[:]
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(headBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -459,9 +437,7 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
headBlock.Block.ParentRoot = finalizedRoot[:]
|
||||
headRoot, err = headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(headBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
|
||||
|
||||
@@ -481,7 +457,7 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New()},
|
||||
}
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 1
|
||||
@@ -505,10 +481,8 @@ func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
b := util.NewBeaconBlock()
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -526,10 +500,8 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
b := util.NewBeaconBlock()
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -552,12 +524,12 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
|
||||
cancel: cancel,
|
||||
initSyncBlocks: make(map[[32]byte]interfaces.SignedBeaconBlock),
|
||||
}
|
||||
b := util.NewBeaconBlock()
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
bb := util.NewBeaconBlock()
|
||||
r, err := bb.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
s.saveInitSyncBlock(r, wsb)
|
||||
require.NoError(t, s.saveInitSyncBlock(ctx, r, wsb))
|
||||
require.NoError(t, s.Stop())
|
||||
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(ctx, r))
|
||||
}
|
||||
@@ -599,10 +571,8 @@ func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
blk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
@@ -622,10 +592,8 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
blk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
|
||||
@@ -117,7 +117,7 @@ func TestStateBalanceCache(t *testing.T) {
|
||||
balances []uint64
|
||||
name string
|
||||
}
|
||||
sentinelCacheMiss := errors.New("Cache missed, as expected!")
|
||||
sentinelCacheMiss := errors.New("cache missed, as expected")
|
||||
sentinelBalances := []uint64{1, 2, 3, 4, 5}
|
||||
halfExpiredValidators, halfExpiredBalances := testHalfExpiredValidators()
|
||||
halfQueuedValidators, halfQueuedBalances := testHalfQueuedValidators()
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"new.go",
|
||||
"setter_getter.go",
|
||||
"type.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"new_test.go",
|
||||
"setter_getter_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,4 +0,0 @@
|
||||
// Package store implements the store object defined in the phase0 fork choice spec.
|
||||
// It serves as a helpful middleware layer in between blockchain pkg and fork choice protoarray pkg.
|
||||
// All the getters and setters are concurrent thread safe
|
||||
package store
|
||||
@@ -1,16 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// New creates a store object.
|
||||
func New(justifiedCheckpt *ethpb.Checkpoint, finalizedCheckpt *ethpb.Checkpoint) *Store {
|
||||
return &Store{
|
||||
justifiedCheckpt: justifiedCheckpt,
|
||||
prevJustifiedCheckpt: justifiedCheckpt,
|
||||
bestJustifiedCheckpt: justifiedCheckpt,
|
||||
finalizedCheckpt: finalizedCheckpt,
|
||||
prevFinalizedCheckpt: finalizedCheckpt,
|
||||
}
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
j := ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: []byte("hi"),
|
||||
}
|
||||
f := ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: []byte("hello"),
|
||||
}
|
||||
s := New(j, f)
|
||||
cp, err := s.JustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, j, cp)
|
||||
cp, err = s.BestJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, j, cp)
|
||||
cp, err = s.PrevJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, j, cp)
|
||||
cp, err = s.FinalizedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, f, cp)
|
||||
cp, err = s.PrevFinalizedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, f, cp)
|
||||
}
|
||||
@@ -1,111 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNilCheckpoint = errors.New("nil checkpoint")
|
||||
)
|
||||
|
||||
// PrevJustifiedCheckpt returns the previous justified checkpoint in the Store.
|
||||
func (s *Store) PrevJustifiedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
if s.prevJustifiedCheckpt == nil {
|
||||
return nil, ErrNilCheckpoint
|
||||
}
|
||||
return s.prevJustifiedCheckpt, nil
|
||||
}
|
||||
|
||||
// BestJustifiedCheckpt returns the best justified checkpoint in the Store.
|
||||
func (s *Store) BestJustifiedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
if s.bestJustifiedCheckpt == nil {
|
||||
return nil, ErrNilCheckpoint
|
||||
}
|
||||
return s.bestJustifiedCheckpt, nil
|
||||
}
|
||||
|
||||
// JustifiedCheckpt returns the justified checkpoint in the Store.
|
||||
func (s *Store) JustifiedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
if s.justifiedCheckpt == nil {
|
||||
return nil, ErrNilCheckpoint
|
||||
}
|
||||
return s.justifiedCheckpt, nil
|
||||
}
|
||||
|
||||
// JustifiedPayloadBlockHash returns the justified payload block hash reflecting justified check point.
|
||||
func (s *Store) JustifiedPayloadBlockHash() [32]byte {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.justifiedPayloadBlockHash
|
||||
}
|
||||
|
||||
// PrevFinalizedCheckpt returns the previous finalized checkpoint in the Store.
|
||||
func (s *Store) PrevFinalizedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
if s.prevFinalizedCheckpt == nil {
|
||||
return nil, ErrNilCheckpoint
|
||||
}
|
||||
return s.prevFinalizedCheckpt, nil
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the finalized checkpoint in the Store.
|
||||
func (s *Store) FinalizedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
if s.finalizedCheckpt == nil {
|
||||
return nil, ErrNilCheckpoint
|
||||
}
|
||||
return s.finalizedCheckpt, nil
|
||||
}
|
||||
|
||||
// FinalizedPayloadBlockHash returns the finalized payload block hash reflecting finalized check point.
|
||||
func (s *Store) FinalizedPayloadBlockHash() [32]byte {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.finalizedPayloadBlockHash
|
||||
}
|
||||
|
||||
// SetPrevJustifiedCheckpt sets the previous justified checkpoint in the Store.
|
||||
func (s *Store) SetPrevJustifiedCheckpt(cp *ethpb.Checkpoint) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.prevJustifiedCheckpt = cp
|
||||
}
|
||||
|
||||
// SetBestJustifiedCheckpt sets the best justified checkpoint in the Store.
|
||||
func (s *Store) SetBestJustifiedCheckpt(cp *ethpb.Checkpoint) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.bestJustifiedCheckpt = cp
|
||||
}
|
||||
|
||||
// SetJustifiedCheckptAndPayloadHash sets the justified checkpoint and blockhash in the Store.
|
||||
func (s *Store) SetJustifiedCheckptAndPayloadHash(cp *ethpb.Checkpoint, h [32]byte) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.justifiedCheckpt = cp
|
||||
s.justifiedPayloadBlockHash = h
|
||||
}
|
||||
|
||||
// SetFinalizedCheckptAndPayloadHash sets the finalized checkpoint and blockhash in the Store.
|
||||
func (s *Store) SetFinalizedCheckptAndPayloadHash(cp *ethpb.Checkpoint, h [32]byte) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.finalizedCheckpt = cp
|
||||
s.finalizedPayloadBlockHash = h
|
||||
}
|
||||
|
||||
// SetPrevFinalizedCheckpt sets the previous finalized checkpoint in the Store.
|
||||
func (s *Store) SetPrevFinalizedCheckpt(cp *ethpb.Checkpoint) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.prevFinalizedCheckpt = cp
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func Test_store_PrevJustifiedCheckpt(t *testing.T) {
|
||||
s := &Store{}
|
||||
var cp *ethpb.Checkpoint
|
||||
_, err := s.PrevJustifiedCheckpt()
|
||||
require.ErrorIs(t, ErrNilCheckpoint, err)
|
||||
cp = ðpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
|
||||
s.SetPrevJustifiedCheckpt(cp)
|
||||
got, err := s.PrevJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cp, got)
|
||||
}
|
||||
|
||||
func Test_store_BestJustifiedCheckpt(t *testing.T) {
|
||||
s := &Store{}
|
||||
var cp *ethpb.Checkpoint
|
||||
_, err := s.BestJustifiedCheckpt()
|
||||
require.ErrorIs(t, ErrNilCheckpoint, err)
|
||||
cp = ðpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
|
||||
s.SetBestJustifiedCheckpt(cp)
|
||||
got, err := s.BestJustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cp, got)
|
||||
}
|
||||
|
||||
func Test_store_JustifiedCheckpt(t *testing.T) {
|
||||
s := &Store{}
|
||||
var cp *ethpb.Checkpoint
|
||||
_, err := s.JustifiedCheckpt()
|
||||
require.ErrorIs(t, ErrNilCheckpoint, err)
|
||||
cp = ðpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
|
||||
h := [32]byte{'b'}
|
||||
s.SetJustifiedCheckptAndPayloadHash(cp, h)
|
||||
got, err := s.JustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cp, got)
|
||||
require.Equal(t, h, s.JustifiedPayloadBlockHash())
|
||||
}
|
||||
|
||||
func Test_store_FinalizedCheckpt(t *testing.T) {
|
||||
s := &Store{}
|
||||
var cp *ethpb.Checkpoint
|
||||
_, err := s.FinalizedCheckpt()
|
||||
require.ErrorIs(t, ErrNilCheckpoint, err)
|
||||
cp = ðpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
|
||||
h := [32]byte{'b'}
|
||||
s.SetFinalizedCheckptAndPayloadHash(cp, h)
|
||||
got, err := s.FinalizedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cp, got)
|
||||
require.Equal(t, h, s.FinalizedPayloadBlockHash())
|
||||
}
|
||||
|
||||
func Test_store_PrevFinalizedCheckpt(t *testing.T) {
|
||||
s := &Store{}
|
||||
var cp *ethpb.Checkpoint
|
||||
_, err := s.PrevFinalizedCheckpt()
|
||||
require.ErrorIs(t, ErrNilCheckpoint, err)
|
||||
cp = ðpb.Checkpoint{Epoch: 1, Root: []byte{'a'}}
|
||||
s.SetPrevFinalizedCheckpt(cp)
|
||||
got, err := s.PrevFinalizedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cp, got)
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// Store is defined in the fork choice consensus spec for tracking current time and various versions of checkpoints.
|
||||
//
|
||||
// Spec code:
|
||||
// class Store(object):
|
||||
// time: uint64
|
||||
// genesis_time: uint64
|
||||
// justified_checkpoint: Checkpoint
|
||||
// finalized_checkpoint: Checkpoint
|
||||
// best_justified_checkpoint: Checkpoint
|
||||
// proposerBoostRoot: Root
|
||||
type Store struct {
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
justifiedPayloadBlockHash [32]byte
|
||||
finalizedCheckpt *ethpb.Checkpoint
|
||||
finalizedPayloadBlockHash [32]byte
|
||||
bestJustifiedCheckpt *ethpb.Checkpoint
|
||||
sync.RWMutex
|
||||
// These are not part of the consensus spec, but we do use them to return gRPC API requests.
|
||||
// TODO(10094): Consider removing in v3.
|
||||
prevFinalizedCheckpt *ethpb.Checkpoint
|
||||
prevJustifiedCheckpt *ethpb.Checkpoint
|
||||
}
|
||||
@@ -63,6 +63,7 @@ type ChainService struct {
|
||||
ForkChoiceStore forkchoice.ForkChoicer
|
||||
ReceiveBlockMockErr error
|
||||
OptimisticCheckRootReceived [32]byte
|
||||
FinalizedRoots map[[32]byte]bool
|
||||
}
|
||||
|
||||
// ForkChoicer mocks the same method in the chain service
|
||||
@@ -283,18 +284,18 @@ func (s *ChainService) CurrentFork() *ethpb.Fork {
|
||||
}
|
||||
|
||||
// FinalizedCheckpt mocks FinalizedCheckpt method in chain service.
|
||||
func (s *ChainService) FinalizedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
return s.FinalizedCheckPoint, nil
|
||||
func (s *ChainService) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
return s.FinalizedCheckPoint
|
||||
}
|
||||
|
||||
// CurrentJustifiedCheckpt mocks CurrentJustifiedCheckpt method in chain service.
|
||||
func (s *ChainService) CurrentJustifiedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
return s.CurrentJustifiedCheckPoint, nil
|
||||
func (s *ChainService) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return s.CurrentJustifiedCheckPoint
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpt mocks PreviousJustifiedCheckpt method in chain service.
|
||||
func (s *ChainService) PreviousJustifiedCheckpt() (*ethpb.Checkpoint, error) {
|
||||
return s.PreviousJustifiedCheckPoint, nil
|
||||
func (s *ChainService) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return s.PreviousJustifiedCheckPoint
|
||||
}
|
||||
|
||||
// ReceiveAttestation mocks ReceiveAttestation method in chain service.
|
||||
@@ -458,3 +459,8 @@ func (s *ChainService) UpdateHead(_ context.Context) error { return nil }
|
||||
|
||||
// ReceiveAttesterSlashing mocks the same method in the chain service.
|
||||
func (s *ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {}
|
||||
|
||||
// IsFinalized mocks the same method in the chain service.
|
||||
func (s *ChainService) IsFinalized(_ context.Context, blockRoot [32]byte) bool {
|
||||
return s.FinalizedRoots[blockRoot]
|
||||
}
|
||||
|
||||
@@ -5,11 +5,11 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
@@ -22,9 +22,7 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1792480
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -74,14 +72,13 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
wv, err := NewWeakSubjectivityVerifier(tt.checkpt, beaconDB)
|
||||
require.Equal(t, !tt.disabled, wv.enabled)
|
||||
require.NoError(t, err)
|
||||
fcs := protoarray.New()
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt},
|
||||
store: &store.Store{},
|
||||
cfg: &config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt, ForkChoiceStore: fcs},
|
||||
wsVerifier: wv,
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: tt.finalizedEpoch}, [32]byte{})
|
||||
cp, err := s.store.FinalizedCheckpt()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: tt.finalizedEpoch}))
|
||||
cp := s.ForkChoicer().FinalizedCheckpoint()
|
||||
err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), cp.Epoch)
|
||||
if tt.wantErr == nil {
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"error.go",
|
||||
"metric.go",
|
||||
"option.go",
|
||||
"service.go",
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
package builder
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
var (
|
||||
ErrNotRunning = errors.New("builder is not running")
|
||||
)
|
||||
@@ -1,6 +1,7 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
@@ -27,10 +28,18 @@ func WithBuilderEndpoints(endpoint string) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithDatabase sets the database for the beacon chain builder service.
|
||||
func WithDatabase(database db.HeadAccessDatabase) Option {
|
||||
// WithHeadFetcher gets the head info from chain service.
|
||||
func WithHeadFetcher(svc *blockchain.Service) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.beaconDB = database
|
||||
s.cfg.headFetcher = svc
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDatabase for head access.
|
||||
func WithDatabase(beaconDB db.HeadAccessDatabase) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.beaconDB = beaconDB
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -21,8 +22,8 @@ import (
|
||||
type BlockBuilder interface {
|
||||
SubmitBlindedBlock(ctx context.Context, block *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error)
|
||||
GetHeader(ctx context.Context, slot types.Slot, parentHash [32]byte, pubKey [48]byte) (*ethpb.SignedBuilderBid, error)
|
||||
Status(ctx context.Context) error
|
||||
RegisterValidator(ctx context.Context, reg *ethpb.SignedValidatorRegistrationV1) error
|
||||
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
|
||||
Configured() bool
|
||||
}
|
||||
|
||||
// config defines a config struct for dependencies into the service.
|
||||
@@ -34,13 +35,20 @@ type config struct {
|
||||
|
||||
// Service defines a service that provides a client for interacting with the beacon chain and MEV relay network.
|
||||
type Service struct {
|
||||
cfg *config
|
||||
c *builder.Client
|
||||
cfg *config
|
||||
c *builder.Client
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// NewService instantiates a new service.
|
||||
func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
s := &Service{}
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(s); err != nil {
|
||||
return nil, err
|
||||
@@ -52,6 +60,13 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
return nil, err
|
||||
}
|
||||
s.c = c
|
||||
|
||||
// Is the builder up?
|
||||
if err := s.c.Status(ctx); err != nil {
|
||||
return nil, fmt.Errorf("could not connect to builder: %v", err)
|
||||
}
|
||||
|
||||
log.WithField("endpoint", c.NodeURL()).Info("Builder has been configured")
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
@@ -89,14 +104,19 @@ func (s *Service) GetHeader(ctx context.Context, slot types.Slot, parentHash [32
|
||||
}
|
||||
|
||||
// Status retrieves the status of the builder relay network.
|
||||
func (s *Service) Status(ctx context.Context) error {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.Status")
|
||||
func (s *Service) Status() error {
|
||||
ctx, span := trace.StartSpan(context.Background(), "builder.Status")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
getStatusLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
// Return early if builder isn't initialized in service.
|
||||
if s.c == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.c.Status(ctx)
|
||||
}
|
||||
|
||||
@@ -132,3 +152,8 @@ func (s *Service) RegisterValidator(ctx context.Context, reg []*ethpb.SignedVali
|
||||
|
||||
return s.cfg.beaconDB.SaveRegistrationsByValidatorIDs(ctx, idxs, msgs)
|
||||
}
|
||||
|
||||
// Configured returns true if the user has input a builder URL.
|
||||
func (s *Service) Configured() bool {
|
||||
return s.cfg.builderEndpoint.Url != ""
|
||||
}
|
||||
|
||||
14
beacon-chain/builder/testing/BUILD.bazel
Normal file
14
beacon-chain/builder/testing/BUILD.bazel
Normal file
@@ -0,0 +1,14 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = ["mock.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/builder/testing",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
39
beacon-chain/builder/testing/mock.go
Normal file
39
beacon-chain/builder/testing/mock.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// MockBuilderService to mock builder.
|
||||
type MockBuilderService struct {
|
||||
HasConfigured bool
|
||||
Payload *v1.ExecutionPayload
|
||||
ErrSubmitBlindedBlock error
|
||||
Bid *ethpb.SignedBuilderBid
|
||||
ErrGetHeader error
|
||||
ErrRegisterValidator error
|
||||
}
|
||||
|
||||
// Configured for mocking.
|
||||
func (s *MockBuilderService) Configured() bool {
|
||||
return s.HasConfigured
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock for mocking.
|
||||
func (s *MockBuilderService) SubmitBlindedBlock(context.Context, *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error) {
|
||||
return s.Payload, s.ErrSubmitBlindedBlock
|
||||
}
|
||||
|
||||
// GetHeader for mocking.
|
||||
func (s *MockBuilderService) GetHeader(context.Context, types.Slot, [32]byte, [48]byte) (*ethpb.SignedBuilderBid, error) {
|
||||
return s.Bid, s.ErrGetHeader
|
||||
}
|
||||
|
||||
// RegisterValidator for mocking.
|
||||
func (s *MockBuilderService) RegisterValidator(context.Context, []*ethpb.SignedValidatorRegistrationV1) error {
|
||||
return s.ErrRegisterValidator
|
||||
}
|
||||
@@ -77,7 +77,7 @@ func New() (*DepositCache, error) {
|
||||
// InsertDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -111,7 +111,7 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
|
||||
|
||||
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
|
||||
func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*ethpb.DepositContainer) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
@@ -130,7 +130,7 @@ func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*eth
|
||||
|
||||
// InsertFinalizedDeposits inserts deposits up to eth1DepositIndex (inclusive) into the finalized deposits cache.
|
||||
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1DepositIndex int64) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
@@ -180,7 +180,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
|
||||
|
||||
// AllDepositContainers returns all historical deposit containers.
|
||||
func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.DepositContainer {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.AllDepositContainers")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.AllDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -191,7 +191,7 @@ func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.Depos
|
||||
// AllDeposits returns a list of historical deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all historical deposits.
|
||||
func (dc *DepositCache) AllDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.AllDeposits")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.AllDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -212,7 +212,7 @@ func (dc *DepositCache) allDeposits(untilBlk *big.Int) []*ethpb.Deposit {
|
||||
// DepositsNumberAndRootAtHeight returns number of deposits made up to blockheight and the
|
||||
// root that corresponds to the latest deposit at that blockheight.
|
||||
func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -228,7 +228,7 @@ func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, block
|
||||
// DepositByPubkey looks through historical deposits and finds one which contains
|
||||
// a certain public key within its deposit data.
|
||||
func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -249,7 +249,7 @@ func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*et
|
||||
|
||||
// FinalizedDeposits returns the finalized deposits trie.
|
||||
func (dc *DepositCache) FinalizedDeposits(ctx context.Context) *FinalizedDeposits {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
@@ -428,9 +428,9 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
|
||||
require.NoError(t, err, "Could not hash deposit data")
|
||||
deps = append(deps, hash[:])
|
||||
}
|
||||
trie, err := trie.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
|
||||
generatedTrie, err := trie.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate deposit trie")
|
||||
rootA, err := trie.HashTreeRoot()
|
||||
rootA, err := generatedTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
rootB, err := cachedDeposits.Deposits.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -490,9 +490,9 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
require.NoError(t, err, "Could not hash deposit data")
|
||||
deps = append(deps, hash[:])
|
||||
}
|
||||
trie, err := trie.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
|
||||
generatedTrie, err := trie.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate deposit trie")
|
||||
rootA, err := trie.HashTreeRoot()
|
||||
rootA, err := generatedTrie.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
rootB, err := cachedDeposits.Deposits.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -29,7 +29,7 @@ type PendingDepositsFetcher interface {
|
||||
// InsertPendingDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -66,7 +66,7 @@ func (dc *DepositCache) PendingDeposits(ctx context.Context, untilBlk *big.Int)
|
||||
// PendingContainers returns a list of deposit containers until the given block number
|
||||
// (inclusive).
|
||||
func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -90,7 +90,7 @@ func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int
|
||||
// RemovePendingDeposit from the database. The deposit is indexed by the
|
||||
// Index. This method does nothing if deposit ptr is nil.
|
||||
func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Deposit) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.RemovePendingDeposit")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.RemovePendingDeposit")
|
||||
defer span.End()
|
||||
|
||||
if d == nil {
|
||||
@@ -109,12 +109,12 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
|
||||
|
||||
idx := -1
|
||||
for i, ctnr := range dc.pendingDeposits {
|
||||
hash, err := hash.HashProto(ctnr.Deposit)
|
||||
h, err := hash.HashProto(ctnr.Deposit)
|
||||
if err != nil {
|
||||
log.Errorf("Could not hash deposit %v", err)
|
||||
continue
|
||||
}
|
||||
if hash == depRoot {
|
||||
if h == depRoot {
|
||||
idx = i
|
||||
break
|
||||
}
|
||||
@@ -128,7 +128,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
|
||||
|
||||
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
|
||||
func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
if merkleTreeIndex == 0 {
|
||||
|
||||
8
beacon-chain/cache/sync_committee_test.go
vendored
8
beacon-chain/cache/sync_committee_test.go
vendored
@@ -161,16 +161,16 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, uint64(numValidators))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(tt.currentSyncCommittee))
|
||||
require.NoError(t, s.SetNextSyncCommittee(tt.nextSyncCommittee))
|
||||
cache := cache.NewSyncCommittee()
|
||||
c := cache.NewSyncCommittee()
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, cache.UpdatePositionsInCommittee(r, s))
|
||||
require.NoError(t, c.UpdatePositionsInCommittee(r, s))
|
||||
for key, indices := range tt.currentSyncMap {
|
||||
pos, err := cache.CurrentPeriodIndexPosition(r, key)
|
||||
pos, err := c.CurrentPeriodIndexPosition(r, key)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, indices, pos)
|
||||
}
|
||||
for key, indices := range tt.nextSyncMap {
|
||||
pos, err := cache.NextPeriodIndexPosition(r, key)
|
||||
pos, err := c.NextPeriodIndexPosition(r, key)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, indices, pos)
|
||||
}
|
||||
|
||||
@@ -34,8 +34,8 @@ func ProcessAttestationsNoVerifySignature(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for idx, attestation := range body.Attestations() {
|
||||
beaconState, err = ProcessAttestationNoVerifySignature(ctx, beaconState, attestation, totalBalance)
|
||||
for idx, att := range body.Attestations() {
|
||||
beaconState, err = ProcessAttestationNoVerifySignature(ctx, beaconState, att, totalBalance)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not verify attestation at index %d in block", idx)
|
||||
}
|
||||
|
||||
@@ -411,21 +411,21 @@ func TestValidatorFlag_Add_ExceedsLength(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconStateAltair{}
|
||||
st := ðpb.BeaconStateAltair{}
|
||||
b := ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{}}
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(st)
|
||||
fuzzer.Fuzz(b)
|
||||
if b.Block == nil {
|
||||
b.Block = ðpb.BeaconBlockAltair{}
|
||||
}
|
||||
s, err := stateAltair.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateAltair.InitializeFromProtoUnsafe(st)
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := altair.ProcessAttestationsNoVerifySignature(context.Background(), s, wsb)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, b)
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, s, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ func ProcessDeposits(
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, errors.New("got a nil deposit in block")
|
||||
}
|
||||
beaconState, err = ProcessDeposit(ctx, beaconState, deposit, batchVerified)
|
||||
beaconState, err = ProcessDeposit(beaconState, deposit, batchVerified)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process deposit from %#x", bytesutil.Trunc(deposit.Data.PublicKey))
|
||||
}
|
||||
@@ -34,7 +34,7 @@ func ProcessDeposits(
|
||||
}
|
||||
|
||||
// ProcessDeposit processes validator deposit for beacon state Altair.
|
||||
func ProcessDeposit(ctx context.Context, beaconState state.BeaconState, deposit *ethpb.Deposit, verifySignature bool) (state.BeaconState, error) {
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verifySignature bool) (state.BeaconState, error) {
|
||||
beaconState, isNewValidator, err := blocks.ProcessDeposit(beaconState, deposit, verifySignature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -40,7 +40,7 @@ func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := stateAltair.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := altair.ProcessDeposit(context.Background(), s, deposit, true)
|
||||
r, err := altair.ProcessDeposit(s, deposit, true)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
|
||||
}
|
||||
|
||||
@@ -183,7 +183,7 @@ func TestProcessDeposit_AddsNewValidatorDeposit(t *testing.T) {
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessDeposit(context.Background(), beaconState, dep[0], true)
|
||||
newState, err := altair.ProcessDeposit(beaconState, dep[0], true)
|
||||
require.NoError(t, err, "Process deposit failed")
|
||||
require.Equal(t, 2, len(newState.Validators()), "Expected validator list to have length 2")
|
||||
require.Equal(t, 2, len(newState.Balances()), "Expected validator balances list to have length 2")
|
||||
@@ -201,9 +201,9 @@ func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
|
||||
dep, _, err := util.DeterministicDepositsAndKeys(1)
|
||||
require.NoError(t, err)
|
||||
dep[0].Data.Signature = make([]byte, 96)
|
||||
trie, _, err := util.DepositTrieFromDeposits(dep)
|
||||
dt, _, err := util.DepositTrieFromDeposits(dep)
|
||||
require.NoError(t, err)
|
||||
root, err := trie.HashTreeRoot()
|
||||
root, err := dt.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
@@ -226,7 +226,7 @@ func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessDeposit(context.Background(), beaconState, dep[0], true)
|
||||
newState, err := altair.ProcessDeposit(beaconState, dep[0], true)
|
||||
require.NoError(t, err, "Expected invalid block deposit to be ignored without error")
|
||||
|
||||
if newState.Eth1DepositIndex() != 1 {
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
// InitializePrecomputeValidators precomputes individual validator for its attested balances and the total sum of validators attested balances of the epoch.
|
||||
func InitializePrecomputeValidators(ctx context.Context, beaconState state.BeaconState) ([]*precompute.Validator, *precompute.Balance, error) {
|
||||
_, span := trace.StartSpan(ctx, "altair.InitializePrecomputeValidators")
|
||||
ctx, span := trace.StartSpan(ctx, "altair.InitializePrecomputeValidators")
|
||||
defer span.End()
|
||||
vals := make([]*precompute.Validator, beaconState.NumValidators())
|
||||
bal := &precompute.Balance{}
|
||||
@@ -76,7 +76,7 @@ func ProcessInactivityScores(
|
||||
beaconState state.BeaconState,
|
||||
vals []*precompute.Validator,
|
||||
) (state.BeaconState, []*precompute.Validator, error) {
|
||||
_, span := trace.StartSpan(ctx, "altair.ProcessInactivityScores")
|
||||
ctx, span := trace.StartSpan(ctx, "altair.ProcessInactivityScores")
|
||||
defer span.End()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
@@ -144,7 +144,7 @@ func ProcessEpochParticipation(
|
||||
bal *precompute.Balance,
|
||||
vals []*precompute.Validator,
|
||||
) ([]*precompute.Validator, *precompute.Balance, error) {
|
||||
_, span := trace.StartSpan(ctx, "altair.ProcessEpochParticipation")
|
||||
ctx, span := trace.StartSpan(ctx, "altair.ProcessEpochParticipation")
|
||||
defer span.End()
|
||||
|
||||
cp, err := beaconState.CurrentEpochParticipation()
|
||||
|
||||
@@ -214,9 +214,12 @@ func ValidateSyncMessageTime(slot types.Slot, genesisTime time.Time, clockDispar
|
||||
// Verify sync message slot is within the time range.
|
||||
if messageTime.Before(lowerBound) || messageTime.After(upperBound) {
|
||||
return fmt.Errorf(
|
||||
"sync message slot %d not within allowable range of %d to %d (current slot)",
|
||||
"sync message time %v (slot %d) not within allowable range of %v (slot %d) to %v (slot %d)",
|
||||
messageTime,
|
||||
slot,
|
||||
lowerBound,
|
||||
uint64(lowerBound.Unix()-genesisTime.Unix())/params.BeaconConfig().SecondsPerSlot,
|
||||
upperBound,
|
||||
uint64(upperBound.Unix()-genesisTime.Unix())/params.BeaconConfig().SecondsPerSlot,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -28,12 +28,12 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
|
||||
}
|
||||
}
|
||||
state, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
st, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return state
|
||||
return st
|
||||
}
|
||||
|
||||
type args struct {
|
||||
@@ -103,27 +103,27 @@ func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
|
||||
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
|
||||
}
|
||||
}
|
||||
state, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
st, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return state
|
||||
return st
|
||||
}
|
||||
|
||||
state := getState(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
got1, err := altair.NextSyncCommitteeIndices(context.Background(), state)
|
||||
st := getState(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
got1, err := altair.NextSyncCommitteeIndices(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
got2, err := altair.NextSyncCommitteeIndices(context.Background(), state)
|
||||
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
got2, err := altair.NextSyncCommitteeIndices(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, got1, got2)
|
||||
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch*types.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)))
|
||||
got2, err = altair.NextSyncCommitteeIndices(context.Background(), state)
|
||||
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*types.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)))
|
||||
got2, err = altair.NextSyncCommitteeIndices(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, got1, got2)
|
||||
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch*types.Slot(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod)))
|
||||
got2, err = altair.NextSyncCommitteeIndices(context.Background(), state)
|
||||
require.NoError(t, st.SetSlot(params.BeaconConfig().SlotsPerEpoch*types.Slot(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod)))
|
||||
got2, err = altair.NextSyncCommitteeIndices(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, got1, got2)
|
||||
}
|
||||
@@ -140,12 +140,12 @@ func TestSyncCommittee_CanGet(t *testing.T) {
|
||||
PublicKey: blsKey.PublicKey().Marshal(),
|
||||
}
|
||||
}
|
||||
state, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
st, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return state
|
||||
return st
|
||||
}
|
||||
|
||||
type args struct {
|
||||
@@ -260,8 +260,8 @@ func TestValidateNilSyncContribution(t *testing.T) {
|
||||
|
||||
func TestSyncSubCommitteePubkeys_CanGet(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
state := getState(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
com, err := altair.NextSyncCommittee(context.Background(), state)
|
||||
st := getState(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
com, err := altair.NextSyncCommittee(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
sub, err := altair.SyncSubCommitteePubkeys(com, 0)
|
||||
require.NoError(t, err)
|
||||
@@ -328,7 +328,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
syncMessageSlot: 16,
|
||||
genesisTime: prysmTime.Now().Add(-(15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)),
|
||||
},
|
||||
wantedErr: "sync message slot 16 not within allowable range of",
|
||||
wantedErr: "(slot 16) not within allowable range of",
|
||||
},
|
||||
{
|
||||
name: "sync_message.slot == current_slot+CLOCK_DISPARITY",
|
||||
@@ -344,7 +344,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
syncMessageSlot: 100,
|
||||
genesisTime: prysmTime.Now().Add(-(100 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second) + params.BeaconNetworkConfig().MaximumGossipClockDisparity + 1000*time.Millisecond),
|
||||
},
|
||||
wantedErr: "sync message slot 100 not within allowable range of",
|
||||
wantedErr: "(slot 100) not within allowable range of",
|
||||
},
|
||||
{
|
||||
name: "sync_message.slot == current_slot-CLOCK_DISPARITY",
|
||||
@@ -360,7 +360,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
syncMessageSlot: 101,
|
||||
genesisTime: prysmTime.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second + params.BeaconNetworkConfig().MaximumGossipClockDisparity)),
|
||||
},
|
||||
wantedErr: "sync message slot 101 not within allowable range of",
|
||||
wantedErr: "(slot 101) not within allowable range of",
|
||||
},
|
||||
{
|
||||
name: "sync_message.slot is well beyond current slot",
|
||||
@@ -395,10 +395,10 @@ func getState(t *testing.T, count uint64) state.BeaconState {
|
||||
PublicKey: blsKey.PublicKey().Marshal(),
|
||||
}
|
||||
}
|
||||
state, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
st, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return state
|
||||
return st
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"attestation.go",
|
||||
"attester_slashing.go",
|
||||
"deposit.go",
|
||||
"error.go",
|
||||
"eth1_data.go",
|
||||
"exit.go",
|
||||
"genesis.go",
|
||||
@@ -15,6 +16,7 @@ go_library(
|
||||
"proposer_slashing.go",
|
||||
"randao.go",
|
||||
"signature.go",
|
||||
"withdrawals.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks",
|
||||
visibility = [
|
||||
@@ -31,7 +33,6 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/forks/bellatrix:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
@@ -40,6 +41,7 @@ go_library(
|
||||
"//contracts/deposit:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//crypto/hash/htr:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
@@ -75,6 +77,7 @@ go_test(
|
||||
"proposer_slashing_test.go",
|
||||
"randao_test.go",
|
||||
"signature_test.go",
|
||||
"withdrawals_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
@@ -89,11 +92,11 @@ go_test(
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/forks/bellatrix:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash/htr:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
|
||||
@@ -31,8 +31,8 @@ func ProcessAttestationsNoVerifySignature(
|
||||
}
|
||||
body := b.Block().Body()
|
||||
var err error
|
||||
for idx, attestation := range body.Attestations() {
|
||||
beaconState, err = ProcessAttestationNoVerifySignature(ctx, beaconState, attestation)
|
||||
for idx, att := range body.Attestations() {
|
||||
beaconState, err = ProcessAttestationNoVerifySignature(ctx, beaconState, att)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not verify attestation at index %d in block", idx)
|
||||
}
|
||||
|
||||
@@ -37,8 +37,8 @@ func ProcessPreGenesisDeposits(
|
||||
|
||||
// ActivateValidatorWithEffectiveBalance updates validator's effective balance, and if it's above MaxEffectiveBalance, validator becomes active in genesis.
|
||||
func ActivateValidatorWithEffectiveBalance(beaconState state.BeaconState, deposits []*ethpb.Deposit) (state.BeaconState, error) {
|
||||
for _, deposit := range deposits {
|
||||
pubkey := deposit.Data.PublicKey
|
||||
for _, d := range deposits {
|
||||
pubkey := d.Data.PublicKey
|
||||
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubkey))
|
||||
// In the event of the pubkey not existing, we continue processing the other
|
||||
// deposits.
|
||||
@@ -85,13 +85,13 @@ func ProcessDeposits(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, deposit := range deposits {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
for _, d := range deposits {
|
||||
if d == nil || d.Data == nil {
|
||||
return nil, errors.New("got a nil deposit in block")
|
||||
}
|
||||
beaconState, _, err = ProcessDeposit(beaconState, deposit, batchVerified)
|
||||
beaconState, _, err = ProcessDeposit(beaconState, d, batchVerified)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process deposit from %#x", bytesutil.Trunc(deposit.Data.PublicKey))
|
||||
return nil, errors.Wrapf(err, "could not process deposit from %#x", bytesutil.Trunc(d.Data.PublicKey))
|
||||
}
|
||||
}
|
||||
return beaconState, nil
|
||||
|
||||
@@ -232,9 +232,9 @@ func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
|
||||
dep, _, err := util.DeterministicDepositsAndKeys(1)
|
||||
require.NoError(t, err)
|
||||
dep[0].Data.Signature = make([]byte, 96)
|
||||
trie, _, err := util.DepositTrieFromDeposits(dep)
|
||||
dt, _, err := util.DepositTrieFromDeposits(dep)
|
||||
require.NoError(t, err)
|
||||
root, err := trie.HashTreeRoot()
|
||||
root, err := dt.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
@@ -283,15 +283,15 @@ func TestPreGenesisDeposits_SkipInvalidDeposit(t *testing.T) {
|
||||
dep, _, err := util.DeterministicDepositsAndKeys(100)
|
||||
require.NoError(t, err)
|
||||
dep[0].Data.Signature = make([]byte, 96)
|
||||
trie, _, err := util.DepositTrieFromDeposits(dep)
|
||||
dt, _, err := util.DepositTrieFromDeposits(dep)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := range dep {
|
||||
proof, err := trie.MerkleProof(i)
|
||||
proof, err := dt.MerkleProof(i)
|
||||
require.NoError(t, err)
|
||||
dep[i].Proof = proof
|
||||
}
|
||||
root, err := trie.HashTreeRoot()
|
||||
root, err := dt.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
|
||||
8
beacon-chain/core/blocks/error.go
Normal file
8
beacon-chain/core/blocks/error.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package blocks
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
var errNilSignedWithdrawalMessage = errors.New("nil SignedBLSToExecutionChange message")
|
||||
var errNilWithdrawalMessage = errors.New("nil BLSToExecutionChange message")
|
||||
var errInvalidBLSPrefix = errors.New("withdrawal credential prefix is not a BLS prefix")
|
||||
var errInvalidWithdrawalCredentials = errors.New("withdrawal credentials do not match")
|
||||
@@ -7,12 +7,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/forks/bellatrix"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
@@ -40,20 +38,15 @@ func IsMergeTransitionComplete(st state.BeaconState) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !bellatrix.IsEmptyHeader(h), nil
|
||||
}
|
||||
|
||||
// IsMergeTransitionBlockUsingPreStatePayloadHeader returns true if the input block is the terminal merge block.
|
||||
// Terminal merge block must be associated with an empty payload header.
|
||||
// This assumes the header `h` is referenced as the parent state for block body `body.
|
||||
func IsMergeTransitionBlockUsingPreStatePayloadHeader(h *ethpb.ExecutionPayloadHeader, body interfaces.BeaconBlockBody) (bool, error) {
|
||||
if h == nil || body == nil {
|
||||
return false, errors.New("nil header or block body")
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(h)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !bellatrix.IsEmptyHeader(h) {
|
||||
return false, nil
|
||||
isEmpty, err := wrapper.IsEmptyExecutionData(wrappedHeader)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return IsExecutionBlock(body)
|
||||
return !isEmpty, nil
|
||||
}
|
||||
|
||||
// IsExecutionBlock returns whether the block has a non-empty ExecutionPayload.
|
||||
@@ -65,7 +58,7 @@ func IsExecutionBlock(body interfaces.BeaconBlockBody) (bool, error) {
|
||||
if body == nil {
|
||||
return false, errors.New("nil block body")
|
||||
}
|
||||
payload, err := body.ExecutionPayload()
|
||||
payload, err := body.Execution()
|
||||
switch {
|
||||
case errors.Is(err, wrapper.ErrUnsupportedField):
|
||||
return false, nil
|
||||
@@ -73,7 +66,11 @@ func IsExecutionBlock(body interfaces.BeaconBlockBody) (bool, error) {
|
||||
return false, err
|
||||
default:
|
||||
}
|
||||
return !bellatrix.IsEmptyPayload(payload), nil
|
||||
isEmpty, err := wrapper.IsEmptyExecutionData(payload)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !isEmpty, nil
|
||||
}
|
||||
|
||||
// IsExecutionEnabled returns true if the beacon chain can begin executing.
|
||||
@@ -98,8 +95,16 @@ func IsExecutionEnabled(st state.BeaconState, body interfaces.BeaconBlockBody) (
|
||||
|
||||
// IsExecutionEnabledUsingHeader returns true if the execution is enabled using post processed payload header and block body.
|
||||
// This is an optimized version of IsExecutionEnabled where beacon state is not required as an argument.
|
||||
func IsExecutionEnabledUsingHeader(header *ethpb.ExecutionPayloadHeader, body interfaces.BeaconBlockBody) (bool, error) {
|
||||
if !bellatrix.IsEmptyHeader(header) {
|
||||
func IsExecutionEnabledUsingHeader(header *enginev1.ExecutionPayloadHeader, body interfaces.BeaconBlockBody) (bool, error) {
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(header)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
isEmpty, err := wrapper.IsEmptyExecutionData(wrappedHeader)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !isEmpty {
|
||||
return true, nil
|
||||
}
|
||||
return IsExecutionBlock(body)
|
||||
@@ -117,7 +122,7 @@ func IsPreBellatrixVersion(v int) bool {
|
||||
// # Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
// if is_merge_complete(state):
|
||||
// assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload *enginev1.ExecutionPayload) error {
|
||||
func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload interfaces.ExecutionData) error {
|
||||
complete, err := IsMergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -130,7 +135,7 @@ func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload *enginev1.E
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(payload.ParentHash, header.BlockHash) {
|
||||
if !bytes.Equal(payload.ParentHash(), header.BlockHash) {
|
||||
return errors.New("incorrect block hash")
|
||||
}
|
||||
return nil
|
||||
@@ -144,20 +149,20 @@ func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload *enginev1.E
|
||||
// assert payload.random == get_randao_mix(state, get_current_epoch(state))
|
||||
// # Verify timestamp
|
||||
// assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
|
||||
func ValidatePayload(st state.BeaconState, payload *enginev1.ExecutionPayload) error {
|
||||
func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) error {
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(payload.PrevRandao, random) {
|
||||
if !bytes.Equal(payload.PrevRandao(), random) {
|
||||
return ErrInvalidPayloadPrevRandao
|
||||
}
|
||||
t, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.Timestamp != uint64(t.Unix()) {
|
||||
if payload.Timestamp() != uint64(t.Unix()) {
|
||||
return ErrInvalidPayloadTimeStamp
|
||||
}
|
||||
return nil
|
||||
@@ -195,27 +200,29 @@ func ValidatePayload(st state.BeaconState, payload *enginev1.ExecutionPayload) e
|
||||
// block_hash=payload.block_hash,
|
||||
// transactions_root=hash_tree_root(payload.transactions),
|
||||
// )
|
||||
func ProcessPayload(st state.BeaconState, payload *enginev1.ExecutionPayload) (state.BeaconState, error) {
|
||||
func ProcessPayload(st state.BeaconState, payload interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
if err := ValidatePayloadWhenMergeCompletes(st, payload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ValidatePayload(st, payload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
header, err := bellatrix.PayloadToHeader(payload)
|
||||
header, err := wrapper.PayloadToHeader(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := st.SetLatestExecutionPayloadHeader(header); err != nil {
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := st.SetLatestExecutionPayloadHeader(wrappedHeader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// ValidatePayloadHeaderWhenMergeCompletes validates the payload header when the merge completes.
|
||||
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header *ethpb.ExecutionPayloadHeader) error {
|
||||
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header interfaces.ExecutionData) error {
|
||||
// Skip validation if the state is not merge compatible.
|
||||
complete, err := IsMergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
@@ -229,20 +236,20 @@ func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header *ethpb
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(header.ParentHash, h.BlockHash) {
|
||||
if !bytes.Equal(header.ParentHash(), h.BlockHash) {
|
||||
return ErrInvalidPayloadBlockHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidatePayloadHeader validates the payload header.
|
||||
func ValidatePayloadHeader(st state.BeaconState, header *ethpb.ExecutionPayloadHeader) error {
|
||||
func ValidatePayloadHeader(st state.BeaconState, header interfaces.ExecutionData) error {
|
||||
// Validate header's random mix matches with state in current epoch
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(header.PrevRandao, random) {
|
||||
if !bytes.Equal(header.PrevRandao(), random) {
|
||||
return ErrInvalidPayloadPrevRandao
|
||||
}
|
||||
|
||||
@@ -251,22 +258,20 @@ func ValidatePayloadHeader(st state.BeaconState, header *ethpb.ExecutionPayloadH
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if header.Timestamp != uint64(t.Unix()) {
|
||||
if header.Timestamp() != uint64(t.Unix()) {
|
||||
return ErrInvalidPayloadTimeStamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessPayloadHeader processes the payload header.
|
||||
func ProcessPayloadHeader(st state.BeaconState, header *ethpb.ExecutionPayloadHeader) (state.BeaconState, error) {
|
||||
func ProcessPayloadHeader(st state.BeaconState, header interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
if err := ValidatePayloadHeaderWhenMergeCompletes(st, header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ValidatePayloadHeader(st, header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := st.SetLatestExecutionPayloadHeader(header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -279,9 +284,9 @@ func GetBlockPayloadHash(blk interfaces.BeaconBlock) ([32]byte, error) {
|
||||
if IsPreBellatrixVersion(blk.Version()) {
|
||||
return payloadHash, nil
|
||||
}
|
||||
payload, err := blk.Body().ExecutionPayload()
|
||||
payload, err := blk.Body().Execution()
|
||||
if err != nil {
|
||||
return payloadHash, err
|
||||
}
|
||||
return bytesutil.ToBytes32(payload.BlockHash), nil
|
||||
return bytesutil.ToBytes32(payload.BlockHash()), nil
|
||||
}
|
||||
|
||||
@@ -9,12 +9,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/forks/bellatrix"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
@@ -23,7 +21,7 @@ import (
|
||||
func Test_IsMergeComplete(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
payload *ethpb.ExecutionPayloadHeader
|
||||
payload *enginev1.ExecutionPayloadHeader
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
@@ -33,7 +31,7 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "has parent hash",
|
||||
payload: func() *ethpb.ExecutionPayloadHeader {
|
||||
payload: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
@@ -42,7 +40,7 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "has fee recipient",
|
||||
payload: func() *ethpb.ExecutionPayloadHeader {
|
||||
payload: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.FeeRecipient = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
@@ -51,7 +49,7 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "has state root",
|
||||
payload: func() *ethpb.ExecutionPayloadHeader {
|
||||
payload: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
@@ -60,7 +58,7 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "has receipt root",
|
||||
payload: func() *ethpb.ExecutionPayloadHeader {
|
||||
payload: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
@@ -69,7 +67,7 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "has logs bloom",
|
||||
payload: func() *ethpb.ExecutionPayloadHeader {
|
||||
payload: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.LogsBloom = bytesutil.PadTo([]byte{'a'}, fieldparams.LogsBloomLength)
|
||||
return h
|
||||
@@ -78,7 +76,7 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "has random",
|
||||
payload: func() *ethpb.ExecutionPayloadHeader {
|
||||
payload: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
@@ -87,7 +85,7 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "has base fee",
|
||||
payload: func() *ethpb.ExecutionPayloadHeader {
|
||||
payload: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.BaseFeePerGas = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
@@ -96,25 +94,16 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "has block hash",
|
||||
payload: func() *ethpb.ExecutionPayloadHeader {
|
||||
payload: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
}(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "has tx root",
|
||||
payload: func() *ethpb.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.TransactionsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
}(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "has extra data",
|
||||
payload: func() *ethpb.ExecutionPayloadHeader {
|
||||
payload: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.ExtraData = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
@@ -123,7 +112,7 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "has block number",
|
||||
payload: func() *ethpb.ExecutionPayloadHeader {
|
||||
payload: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.BlockNumber = 1
|
||||
return h
|
||||
@@ -132,7 +121,7 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "has gas limit",
|
||||
payload: func() *ethpb.ExecutionPayloadHeader {
|
||||
payload: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.GasLimit = 1
|
||||
return h
|
||||
@@ -141,7 +130,7 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "has gas used",
|
||||
payload: func() *ethpb.ExecutionPayloadHeader {
|
||||
payload: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.GasUsed = 1
|
||||
return h
|
||||
@@ -150,7 +139,7 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "has time stamp",
|
||||
payload: func() *ethpb.ExecutionPayloadHeader {
|
||||
payload: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.Timestamp = 1
|
||||
return h
|
||||
@@ -161,7 +150,9 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.payload))
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.payload)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
got, err := blocks.IsMergeTransitionComplete(st)
|
||||
require.NoError(t, err)
|
||||
if got != tt.want {
|
||||
@@ -171,185 +162,6 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_IsMergeTransitionBlockUsingPayloadHeader(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
payload *enginev1.ExecutionPayload
|
||||
header *ethpb.ExecutionPayloadHeader
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "empty header, empty payload",
|
||||
payload: emptyPayload(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "non-empty header, empty payload",
|
||||
payload: emptyPayload(),
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
}(),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has parent hash",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has fee recipient",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.FeeRecipient = bytesutil.PadTo([]byte{'a'}, fieldparams.FeeRecipientLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has state root",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has receipt root",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has logs bloom",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.LogsBloom = bytesutil.PadTo([]byte{'a'}, fieldparams.LogsBloomLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has random",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has base fee",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.BaseFeePerGas = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has block hash",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has tx",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.Transactions = [][]byte{{'a'}}
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has extra data",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.ExtraData = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has block number",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.BlockNumber = 1
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has gas limit",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.GasLimit = 1
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has gas used",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.GasUsed = 1
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has timestamp",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.Timestamp = 1
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload = tt.payload
|
||||
body, err := wrapper.WrappedBeaconBlockBody(blk.Block.Body)
|
||||
require.NoError(t, err)
|
||||
got, err := blocks.IsMergeTransitionBlockUsingPreStatePayloadHeader(tt.header, body)
|
||||
require.NoError(t, err)
|
||||
if got != tt.want {
|
||||
t.Errorf("MergeTransitionBlock() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_IsExecutionBlock(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -388,7 +200,7 @@ func Test_IsExecutionEnabled(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
payload *enginev1.ExecutionPayload
|
||||
header *ethpb.ExecutionPayloadHeader
|
||||
header *enginev1.ExecutionPayloadHeader
|
||||
useAltairSt bool
|
||||
want bool
|
||||
}{
|
||||
@@ -408,7 +220,7 @@ func Test_IsExecutionEnabled(t *testing.T) {
|
||||
{
|
||||
name: "non-empty header, empty payload",
|
||||
payload: emptyPayload(),
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
header: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
@@ -427,7 +239,7 @@ func Test_IsExecutionEnabled(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "non-empty header, non-empty payload",
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
header: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
@@ -443,7 +255,9 @@ func Test_IsExecutionEnabled(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.header))
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload = tt.payload
|
||||
body, err := wrapper.WrappedBeaconBlockBody(blk.Block.Body)
|
||||
@@ -464,7 +278,7 @@ func Test_IsExecutionEnabledUsingHeader(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
payload *enginev1.ExecutionPayload
|
||||
header *ethpb.ExecutionPayloadHeader
|
||||
header *enginev1.ExecutionPayloadHeader
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
@@ -476,7 +290,7 @@ func Test_IsExecutionEnabledUsingHeader(t *testing.T) {
|
||||
{
|
||||
name: "non-empty header, empty payload",
|
||||
payload: emptyPayload(),
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
header: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
@@ -495,7 +309,7 @@ func Test_IsExecutionEnabledUsingHeader(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "non-empty header, non-empty payload",
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
header: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
@@ -527,7 +341,7 @@ func Test_ValidatePayloadWhenMergeCompletes(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
payload *enginev1.ExecutionPayload
|
||||
header *ethpb.ExecutionPayloadHeader
|
||||
header *enginev1.ExecutionPayloadHeader
|
||||
err error
|
||||
}{
|
||||
{
|
||||
@@ -543,7 +357,7 @@ func Test_ValidatePayloadWhenMergeCompletes(t *testing.T) {
|
||||
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
header: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
@@ -557,7 +371,7 @@ func Test_ValidatePayloadWhenMergeCompletes(t *testing.T) {
|
||||
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
header: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.BlockHash = bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength)
|
||||
return h
|
||||
@@ -568,8 +382,12 @@ func Test_ValidatePayloadWhenMergeCompletes(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.header))
|
||||
err := blocks.ValidatePayloadWhenMergeCompletes(st, tt.payload)
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(tt.payload)
|
||||
require.NoError(t, err)
|
||||
err = blocks.ValidatePayloadWhenMergeCompletes(st, wrappedPayload)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
@@ -617,7 +435,9 @@ func Test_ValidatePayload(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := blocks.ValidatePayload(st, tt.payload)
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(tt.payload)
|
||||
require.NoError(t, err)
|
||||
err = blocks.ValidatePayload(st, wrappedPayload)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
@@ -665,12 +485,14 @@ func Test_ProcessPayload(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, err := blocks.ProcessPayload(st, tt.payload)
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(tt.payload)
|
||||
require.NoError(t, err)
|
||||
st, err := blocks.ProcessPayload(st, wrappedPayload)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
require.Equal(t, tt.err, err)
|
||||
want, err := bellatrix.PayloadToHeader(tt.payload)
|
||||
want, err := wrapper.PayloadToHeader(wrappedPayload)
|
||||
require.Equal(t, tt.err, err)
|
||||
got, err := st.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
@@ -688,12 +510,12 @@ func Test_ProcessPayloadHeader(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
name string
|
||||
header *ethpb.ExecutionPayloadHeader
|
||||
header *enginev1.ExecutionPayloadHeader
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "process passes",
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
header: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.PrevRandao = random
|
||||
h.Timestamp = uint64(ts.Unix())
|
||||
@@ -707,7 +529,7 @@ func Test_ProcessPayloadHeader(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "incorrect timestamp",
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
header: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.PrevRandao = random
|
||||
h.Timestamp = 1
|
||||
@@ -718,7 +540,9 @@ func Test_ProcessPayloadHeader(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, err := blocks.ProcessPayloadHeader(st, tt.header)
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
st, err := blocks.ProcessPayloadHeader(st, wrappedHeader)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
@@ -739,12 +563,12 @@ func Test_ValidatePayloadHeader(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
name string
|
||||
header *ethpb.ExecutionPayloadHeader
|
||||
header *enginev1.ExecutionPayloadHeader
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "process passes",
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
header: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.PrevRandao = random
|
||||
h.Timestamp = uint64(ts.Unix())
|
||||
@@ -758,7 +582,7 @@ func Test_ValidatePayloadHeader(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "incorrect timestamp",
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
header: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.PrevRandao = random
|
||||
h.Timestamp = 1
|
||||
@@ -769,7 +593,9 @@ func Test_ValidatePayloadHeader(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := blocks.ValidatePayloadHeader(st, tt.header)
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
err = blocks.ValidatePayloadHeader(st, wrappedHeader)
|
||||
require.Equal(t, tt.err, err)
|
||||
})
|
||||
}
|
||||
@@ -778,16 +604,18 @@ func Test_ValidatePayloadHeader(t *testing.T) {
|
||||
func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
emptySt := st.Copy()
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(ðpb.ExecutionPayloadHeader{BlockHash: []byte{'a'}}))
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(&enginev1.ExecutionPayloadHeader{BlockHash: []byte{'a'}})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
header *ethpb.ExecutionPayloadHeader
|
||||
header *enginev1.ExecutionPayloadHeader
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "no merge",
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
header: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
return h
|
||||
}(),
|
||||
@@ -796,7 +624,7 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "process passes",
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
header: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.ParentHash = []byte{'a'}
|
||||
return h
|
||||
@@ -806,7 +634,7 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "invalid block hash",
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
header: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.ParentHash = []byte{'b'}
|
||||
return h
|
||||
@@ -817,7 +645,9 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := blocks.ValidatePayloadHeaderWhenMergeCompletes(tt.state, tt.header)
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
err = blocks.ValidatePayloadHeaderWhenMergeCompletes(tt.state, wrappedHeader)
|
||||
require.Equal(t, tt.err, err)
|
||||
})
|
||||
}
|
||||
@@ -825,7 +655,9 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
|
||||
func Test_PayloadToHeader(t *testing.T) {
|
||||
p := emptyPayload()
|
||||
h, err := bellatrix.PayloadToHeader(p)
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(p)
|
||||
require.NoError(t, err)
|
||||
h, err := wrapper.PayloadToHeader(wrappedPayload)
|
||||
require.NoError(t, err)
|
||||
txRoot, err := ssz.TransactionsRoot(p.Transactions)
|
||||
require.NoError(t, err)
|
||||
@@ -864,7 +696,9 @@ func Test_PayloadToHeader(t *testing.T) {
|
||||
|
||||
func BenchmarkBellatrixComplete(b *testing.B) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(b, 1)
|
||||
require.NoError(b, st.SetLatestExecutionPayloadHeader(emptyPayloadHeader()))
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(emptyPayloadHeader())
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@@ -873,8 +707,8 @@ func BenchmarkBellatrixComplete(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
func emptyPayloadHeader() *ethpb.ExecutionPayloadHeader {
|
||||
return ðpb.ExecutionPayloadHeader{
|
||||
func emptyPayloadHeader() *enginev1.ExecutionPayloadHeader {
|
||||
return &enginev1.ExecutionPayloadHeader{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
|
||||
78
beacon-chain/core/blocks/withdrawals.go
Normal file
78
beacon-chain/core/blocks/withdrawals.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash/htr"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
const executionToBLSPadding = 12
|
||||
|
||||
// ProcessBLSToExecutionChange validates a SignedBLSToExecution message and
|
||||
// changes the validator's withdrawal address accordingly.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
//def process_bls_to_execution_change(state: BeaconState, signed_address_change: SignedBLSToExecutionChange) -> None:
|
||||
// validator = state.validators[address_change.validator_index]
|
||||
//
|
||||
// assert validator.withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX
|
||||
// assert validator.withdrawal_credentials[1:] == hash(address_change.from_bls_pubkey)[1:]
|
||||
//
|
||||
// domain = get_domain(state, DOMAIN_BLS_TO_EXECUTION_CHANGE)
|
||||
// signing_root = compute_signing_root(address_change, domain)
|
||||
// assert bls.Verify(address_change.from_bls_pubkey, signing_root, signed_address_change.signature)
|
||||
//
|
||||
// validator.withdrawal_credentials = (
|
||||
// ETH1_ADDRESS_WITHDRAWAL_PREFIX
|
||||
// + b'\x00' * 11
|
||||
// + address_change.to_execution_address
|
||||
// )
|
||||
//
|
||||
func ProcessBLSToExecutionChange(st state.BeaconState, signed *ethpb.SignedBLSToExecutionChange) (state.BeaconState, error) {
|
||||
if signed == nil {
|
||||
return st, errNilSignedWithdrawalMessage
|
||||
}
|
||||
message := signed.Message
|
||||
if message == nil {
|
||||
return st, errNilWithdrawalMessage
|
||||
}
|
||||
|
||||
val, err := st.ValidatorAtIndex(message.ValidatorIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cred := val.WithdrawalCredentials
|
||||
if cred[0] != params.BeaconConfig().BLSWithdrawalPrefixByte {
|
||||
return nil, errInvalidBLSPrefix
|
||||
}
|
||||
|
||||
// hash the public key and verify it matches the withdrawal credentials
|
||||
fromPubkey := message.FromBlsPubkey
|
||||
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(fromPubkey[:32]), bytesutil.ToBytes32(fromPubkey[32:])}
|
||||
digest := make([][32]byte, 1)
|
||||
htr.VectorizedSha256(pubkeyChunks, digest)
|
||||
if !bytes.Equal(digest[0][1:], cred[1:]) {
|
||||
return nil, errInvalidWithdrawalCredentials
|
||||
}
|
||||
|
||||
epoch := slots.ToEpoch(st.Slot())
|
||||
domain, err := signing.Domain(st.Fork(), epoch, params.BeaconConfig().DomainBLSToExecutionChange, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := signing.VerifySigningRoot(message, fromPubkey, signed.Signature, domain); err != nil {
|
||||
return nil, signing.ErrSigFailedToVerify
|
||||
}
|
||||
newCredentials := make([]byte, executionToBLSPadding)
|
||||
newCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
val.WithdrawalCredentials = append(newCredentials, message.ToExecutionAddress...)
|
||||
err = st.UpdateValidatorAtIndex(message.ValidatorIndex, val)
|
||||
return st, err
|
||||
}
|
||||
193
beacon-chain/core/blocks/withdrawals_test.go
Normal file
193
beacon-chain/core/blocks/withdrawals_test.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash/htr"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestProcessBLSToExecutionChange(t *testing.T) {
|
||||
t.Run("happy case", func(t *testing.T) {
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ðpb.BLSToExecutionChange{
|
||||
ToExecutionAddress: []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13},
|
||||
ValidatorIndex: 0,
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(pubkey[:32]), bytesutil.ToBytes32(pubkey[32:])}
|
||||
digest := make([][32]byte, 1)
|
||||
htr.VectorizedSha256(pubkeyChunks, digest)
|
||||
digest[0][0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
WithdrawalCredentials: digest[0][:],
|
||||
},
|
||||
}
|
||||
st, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch * 5,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, priv)
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
Message: message,
|
||||
Signature: signature,
|
||||
}
|
||||
|
||||
st, err = blocks.ProcessBLSToExecutionChange(st, signed)
|
||||
require.NoError(t, err)
|
||||
|
||||
val, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, message.ToExecutionAddress, val.WithdrawalCredentials[12:])
|
||||
})
|
||||
|
||||
t.Run("non-existent validator", func(t *testing.T) {
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ðpb.BLSToExecutionChange{
|
||||
ToExecutionAddress: []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13},
|
||||
ValidatorIndex: 1,
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(pubkey[:32]), bytesutil.ToBytes32(pubkey[32:])}
|
||||
digest := make([][32]byte, 1)
|
||||
htr.VectorizedSha256(pubkeyChunks, digest)
|
||||
digest[0][0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
WithdrawalCredentials: digest[0][:],
|
||||
},
|
||||
}
|
||||
st, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch * 5,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, priv)
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
Message: message,
|
||||
Signature: signature,
|
||||
}
|
||||
|
||||
_, err = blocks.ProcessBLSToExecutionChange(st, signed)
|
||||
require.ErrorContains(t, "out of range", err)
|
||||
})
|
||||
|
||||
t.Run("signature does not verify", func(t *testing.T) {
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ðpb.BLSToExecutionChange{
|
||||
ToExecutionAddress: []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13},
|
||||
ValidatorIndex: 0,
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
|
||||
},
|
||||
}
|
||||
st, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch * 5,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, priv)
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
Message: message,
|
||||
Signature: signature,
|
||||
}
|
||||
|
||||
_, err = blocks.ProcessBLSToExecutionChange(st, signed)
|
||||
require.ErrorContains(t, "withdrawal credentials do not match", err)
|
||||
})
|
||||
|
||||
t.Run("invalid BLS prefix", func(t *testing.T) {
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ðpb.BLSToExecutionChange{
|
||||
ToExecutionAddress: []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13},
|
||||
ValidatorIndex: 0,
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(pubkey[:32]), bytesutil.ToBytes32(pubkey[32:])}
|
||||
digest := make([][32]byte, 1)
|
||||
htr.VectorizedSha256(pubkeyChunks, digest)
|
||||
digest[0][0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
WithdrawalCredentials: digest[0][:],
|
||||
},
|
||||
}
|
||||
registry[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
|
||||
st, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch * 5,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, priv)
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
Message: message,
|
||||
Signature: signature,
|
||||
}
|
||||
|
||||
_, err = blocks.ProcessBLSToExecutionChange(st, signed)
|
||||
require.ErrorContains(t, "withdrawal credential prefix is not a BLS prefix", err)
|
||||
|
||||
})
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
@@ -19,6 +20,12 @@ func UnrealizedCheckpoints(st state.BeaconState) (*ethpb.Checkpoint, *ethpb.Chec
|
||||
return nil, nil, errNilState
|
||||
}
|
||||
|
||||
if slots.ToEpoch(st.Slot()) <= params.BeaconConfig().GenesisEpoch+1 {
|
||||
jc := st.CurrentJustifiedCheckpoint()
|
||||
fc := st.FinalizedCheckpoint()
|
||||
return jc, fc, nil
|
||||
}
|
||||
|
||||
activeBalance, prevTarget, currentTarget, err := st.UnrealizedCheckpointBalances()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
// pre computed instances of validators attesting records and total
|
||||
// balances attested in an epoch.
|
||||
func New(ctx context.Context, s state.BeaconState) ([]*Validator, *Balance, error) {
|
||||
_, span := trace.StartSpan(ctx, "precomputeEpoch.New")
|
||||
ctx, span := trace.StartSpan(ctx, "precomputeEpoch.New")
|
||||
defer span.End()
|
||||
|
||||
pValidators := make([]*Validator, s.NumValidators())
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v3:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -25,6 +26,7 @@ go_test(
|
||||
":go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
|
||||
@@ -1,18 +1,17 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// UpgradeToBellatrix updates inputs a generic state to return the version Bellatrix state.
|
||||
// It inserts an empty `ExecutionPayloadHeader` into the state.
|
||||
func UpgradeToBellatrix(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
func UpgradeToBellatrix(state state.BeaconState) (state.BeaconState, error) {
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
currentSyncCommittee, err := state.CurrentSyncCommittee()
|
||||
@@ -65,7 +64,7 @@ func UpgradeToBellatrix(ctx context.Context, state state.BeaconState) (state.Bea
|
||||
InactivityScores: inactivityScores,
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: ðpb.ExecutionPayloadHeader{
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeader{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
package execution_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/execution"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
func TestUpgradeToBellatrix(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
preForkState := st.Copy()
|
||||
mSt, err := execution.UpgradeToBellatrix(context.Background(), st)
|
||||
mSt, err := execution.UpgradeToBellatrix(st)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, preForkState.GenesisTime(), mSt.GenesisTime())
|
||||
@@ -61,7 +61,7 @@ func TestUpgradeToBellatrix(t *testing.T) {
|
||||
|
||||
header, err := mSt.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
wanted := ðpb.ExecutionPayloadHeader{
|
||||
wanted := &enginev1.ExecutionPayloadHeader{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
|
||||
@@ -121,8 +121,8 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
ClearCache()
|
||||
_, proposerIndexToSlots, err := CommitteeAssignments(context.Background(), state, 0)
|
||||
require.NoError(t, err, "Failed to determine CommitteeAssignments")
|
||||
for _, slots := range proposerIndexToSlots {
|
||||
for _, s := range slots {
|
||||
for _, ss := range proposerIndexToSlots {
|
||||
for _, s := range ss {
|
||||
assert.NotEqual(t, uint64(0), s, "No proposer should be assigned to slot 0")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,8 +96,8 @@ func ComputeShuffledIndex(index types.ValidatorIndex, indexCount uint64, seed [3
|
||||
copy(buf[:32], seed[:])
|
||||
for {
|
||||
buf[seedSize] = round
|
||||
hash := hashfunc(buf[:pivotViewSize])
|
||||
hash8 := hash[:8]
|
||||
h := hashfunc(buf[:pivotViewSize])
|
||||
hash8 := h[:8]
|
||||
hash8Int := bytesutil.FromBytes8(hash8)
|
||||
pivot := hash8Int % indexCount
|
||||
flip := (pivot + indexCount - uint64(index)) % indexCount
|
||||
|
||||
@@ -16,8 +16,8 @@ go_library(
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ferranbt_fastssz//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package signing
|
||||
|
||||
import (
|
||||
fssz "github.com/ferranbt/fastssz"
|
||||
"github.com/pkg/errors"
|
||||
fssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
|
||||
@@ -112,7 +112,7 @@ func TestSigningRoot_ComputeForkDigest(t *testing.T) {
|
||||
|
||||
func TestFuzzverifySigningRoot_10000(_ *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
st := ðpb.BeaconState{}
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{}
|
||||
sig := [96]byte{}
|
||||
domain := [4]byte{}
|
||||
@@ -120,17 +120,17 @@ func TestFuzzverifySigningRoot_10000(_ *testing.T) {
|
||||
var s []byte
|
||||
var d []byte
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(st)
|
||||
fuzzer.Fuzz(&pubkey)
|
||||
fuzzer.Fuzz(&sig)
|
||||
fuzzer.Fuzz(&domain)
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(st)
|
||||
fuzzer.Fuzz(&p)
|
||||
fuzzer.Fuzz(&s)
|
||||
fuzzer.Fuzz(&d)
|
||||
err := signing.VerifySigningRoot(state, pubkey[:], sig[:], domain[:])
|
||||
err := signing.VerifySigningRoot(st, pubkey[:], sig[:], domain[:])
|
||||
_ = err
|
||||
err = signing.VerifySigningRoot(state, p, s, d)
|
||||
err = signing.VerifySigningRoot(st, p, s, d)
|
||||
_ = err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,9 +43,9 @@ func TestCurrentEpoch_OK(t *testing.T) {
|
||||
{slot: 200, epoch: 6},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
state, err := v1.InitializeFromProto(ð.BeaconState{Slot: tt.slot})
|
||||
st, err := v1.InitializeFromProto(ð.BeaconState{Slot: tt.slot})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.epoch, time.CurrentEpoch(state), "ActiveCurrentEpoch(%d)", state.Slot())
|
||||
assert.Equal(t, tt.epoch, time.CurrentEpoch(st), "ActiveCurrentEpoch(%d)", st.Slot())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,9 +59,9 @@ func TestPrevEpoch_OK(t *testing.T) {
|
||||
{slot: 2 * params.BeaconConfig().SlotsPerEpoch, epoch: 1},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
state, err := v1.InitializeFromProto(ð.BeaconState{Slot: tt.slot})
|
||||
st, err := v1.InitializeFromProto(ð.BeaconState{Slot: tt.slot})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.epoch, time.PrevEpoch(state), "ActivePrevEpoch(%d)", state.Slot())
|
||||
assert.Equal(t, tt.epoch, time.PrevEpoch(st), "ActivePrevEpoch(%d)", st.Slot())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,9 +77,9 @@ func TestNextEpoch_OK(t *testing.T) {
|
||||
{slot: 200, epoch: types.Epoch(200/params.BeaconConfig().SlotsPerEpoch + 1)},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
state, err := v1.InitializeFromProto(ð.BeaconState{Slot: tt.slot})
|
||||
st, err := v1.InitializeFromProto(ð.BeaconState{Slot: tt.slot})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.epoch, time.NextEpoch(state), "NextEpoch(%d)", state.Slot())
|
||||
assert.Equal(t, tt.epoch, time.NextEpoch(st), "NextEpoch(%d)", st.Slot())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -54,23 +54,23 @@ import (
|
||||
// return state
|
||||
// This method differs from the spec so as to process deposits beforehand instead of the end of the function.
|
||||
func GenesisBeaconState(ctx context.Context, deposits []*ethpb.Deposit, genesisTime uint64, eth1Data *ethpb.Eth1Data) (state.BeaconState, error) {
|
||||
state, err := EmptyGenesisState()
|
||||
st, err := EmptyGenesisState()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Process initial deposits.
|
||||
state, err = helpers.UpdateGenesisEth1Data(state, deposits, eth1Data)
|
||||
st, err = helpers.UpdateGenesisEth1Data(st, deposits, eth1Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
state, err = b.ProcessPreGenesisDeposits(ctx, state, deposits)
|
||||
st, err = b.ProcessPreGenesisDeposits(ctx, st, deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process validator deposits")
|
||||
}
|
||||
|
||||
return OptimizedGenesisBeaconState(genesisTime, state, state.Eth1Data())
|
||||
return OptimizedGenesisBeaconState(genesisTime, st, st.Eth1Data())
|
||||
}
|
||||
|
||||
// OptimizedGenesisBeaconState is used to create a state that has already processed deposits. This is to efficiently
|
||||
@@ -111,7 +111,7 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
|
||||
return nil, errors.Wrapf(err, "could not hash tree root genesis validators %v", err)
|
||||
}
|
||||
|
||||
state := ðpb.BeaconState{
|
||||
st := ðpb.BeaconState{
|
||||
// Misc fields.
|
||||
Slot: 0,
|
||||
GenesisTime: genesisTime,
|
||||
@@ -170,18 +170,18 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
|
||||
return nil, errors.Wrap(err, "could not hash tree root empty block body")
|
||||
}
|
||||
|
||||
state.LatestBlockHeader = ðpb.BeaconBlockHeader{
|
||||
st.LatestBlockHeader = ðpb.BeaconBlockHeader{
|
||||
ParentRoot: zeroHash,
|
||||
StateRoot: zeroHash,
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
|
||||
return v1.InitializeFromProto(state)
|
||||
return v1.InitializeFromProto(st)
|
||||
}
|
||||
|
||||
// EmptyGenesisState returns an empty beacon state object.
|
||||
func EmptyGenesisState() (state.BeaconState, error) {
|
||||
state := ðpb.BeaconState{
|
||||
st := ðpb.BeaconState{
|
||||
// Misc fields.
|
||||
Slot: 0,
|
||||
Fork: ðpb.Fork{
|
||||
@@ -203,7 +203,7 @@ func EmptyGenesisState() (state.BeaconState, error) {
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{},
|
||||
Eth1DepositIndex: 0,
|
||||
}
|
||||
return v1.InitializeFromProto(state)
|
||||
return v1.InitializeFromProto(st)
|
||||
}
|
||||
|
||||
// IsValidGenesisState gets called whenever there's a deposit event,
|
||||
|
||||
@@ -279,7 +279,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
}
|
||||
|
||||
if time.CanUpgradeToBellatrix(state.Slot()) {
|
||||
state, err = execution.UpgradeToBellatrix(ctx, state)
|
||||
state, err = execution.UpgradeToBellatrix(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
|
||||
@@ -42,7 +42,7 @@ import (
|
||||
// assert block.state_root == hash_tree_root(state)
|
||||
func ExecuteStateTransitionNoVerifyAnySig(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
st state.BeaconState,
|
||||
signed interfaces.SignedBeaconBlock,
|
||||
) (*bls.SignatureBatch, state.BeaconState, error) {
|
||||
if ctx.Err() != nil {
|
||||
@@ -57,21 +57,21 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
||||
var err error
|
||||
|
||||
interop.WriteBlockToDisk(signed, false /* Has the block failed */)
|
||||
interop.WriteStateToDisk(state)
|
||||
interop.WriteStateToDisk(st)
|
||||
|
||||
state, err = ProcessSlotsUsingNextSlotCache(ctx, state, signed.Block().ParentRoot(), signed.Block().Slot())
|
||||
st, err = ProcessSlotsUsingNextSlotCache(ctx, st, signed.Block().ParentRoot(), signed.Block().Slot())
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not process slots")
|
||||
}
|
||||
|
||||
// Execute per block transition.
|
||||
set, state, err := ProcessBlockNoVerifyAnySig(ctx, state, signed)
|
||||
set, st, err := ProcessBlockNoVerifyAnySig(ctx, st, signed)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not process block")
|
||||
}
|
||||
|
||||
// State root validation.
|
||||
postStateRoot, err := state.HashTreeRoot(ctx)
|
||||
postStateRoot, err := st.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -80,7 +80,7 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
||||
postStateRoot[:], signed.Block().StateRoot())
|
||||
}
|
||||
|
||||
return set, state, nil
|
||||
return set, st, nil
|
||||
}
|
||||
|
||||
// CalculateStateRoot defines the procedure for a state transition function.
|
||||
@@ -155,7 +155,7 @@ func CalculateStateRoot(
|
||||
// process_operations(state, block.body)
|
||||
func ProcessBlockNoVerifyAnySig(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
st state.BeaconState,
|
||||
signed interfaces.SignedBeaconBlock,
|
||||
) (*bls.SignatureBatch, state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ProcessBlockNoVerifyAnySig")
|
||||
@@ -164,27 +164,27 @@ func ProcessBlockNoVerifyAnySig(
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if state.Version() != signed.Block().Version() {
|
||||
return nil, nil, fmt.Errorf("state and block are different version. %d != %d", state.Version(), signed.Block().Version())
|
||||
if st.Version() != signed.Block().Version() {
|
||||
return nil, nil, fmt.Errorf("state and block are different version. %d != %d", st.Version(), signed.Block().Version())
|
||||
}
|
||||
|
||||
blk := signed.Block()
|
||||
state, err := ProcessBlockForStateRoot(ctx, state, signed)
|
||||
st, err := ProcessBlockForStateRoot(ctx, st, signed)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
bSet, err := b.BlockSignatureBatch(state, blk.ProposerIndex(), signed.Signature(), blk.HashTreeRoot)
|
||||
bSet, err := b.BlockSignatureBatch(st, blk.ProposerIndex(), signed.Signature(), blk.HashTreeRoot)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve block signature set")
|
||||
}
|
||||
rSet, err := b.RandaoSignatureBatch(ctx, state, signed.Block().Body().RandaoReveal())
|
||||
rSet, err := b.RandaoSignatureBatch(ctx, st, signed.Block().Body().RandaoReveal())
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve randao signature set")
|
||||
}
|
||||
aSet, err := b.AttestationSignatureBatch(ctx, state, signed.Block().Body().Attestations())
|
||||
aSet, err := b.AttestationSignatureBatch(ctx, st, signed.Block().Body().Attestations())
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve attestation signature set")
|
||||
}
|
||||
@@ -193,7 +193,7 @@ func ProcessBlockNoVerifyAnySig(
|
||||
set := bls.NewSet()
|
||||
set.Join(bSet).Join(rSet).Join(aSet)
|
||||
|
||||
return set, state, nil
|
||||
return set, st, nil
|
||||
}
|
||||
|
||||
// ProcessOperationsNoVerifyAttsSigs processes the operations in the beacon block and updates beacon state
|
||||
@@ -238,7 +238,7 @@ func ProcessOperationsNoVerifyAttsSigs(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case version.Altair, version.Bellatrix:
|
||||
case version.Altair, version.Bellatrix, version.BellatrixBlind:
|
||||
state, err = altairOperations(ctx, state, signedBeaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -290,24 +290,17 @@ func ProcessBlockForStateRoot(
|
||||
return nil, errors.Wrap(err, "could not check if execution is enabled")
|
||||
}
|
||||
if enabled {
|
||||
executionData, err := blk.Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if blk.IsBlinded() {
|
||||
header, err := blk.Body().ExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = b.ProcessPayloadHeader(state, header)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution payload header")
|
||||
}
|
||||
state, err = b.ProcessPayloadHeader(state, executionData)
|
||||
} else {
|
||||
payload, err := blk.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = b.ProcessPayload(state, payload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution payload")
|
||||
}
|
||||
state, err = b.ProcessPayload(state, executionData)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution data")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -348,45 +341,45 @@ func ProcessBlockForStateRoot(
|
||||
// This calls altair block operations.
|
||||
func altairOperations(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
st state.BeaconState,
|
||||
signedBeaconBlock interfaces.SignedBeaconBlock) (state.BeaconState, error) {
|
||||
state, err := b.ProcessProposerSlashings(ctx, state, signedBeaconBlock.Block().Body().ProposerSlashings(), v.SlashValidator)
|
||||
st, err := b.ProcessProposerSlashings(ctx, st, signedBeaconBlock.Block().Body().ProposerSlashings(), v.SlashValidator)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair proposer slashing")
|
||||
}
|
||||
state, err = b.ProcessAttesterSlashings(ctx, state, signedBeaconBlock.Block().Body().AttesterSlashings(), v.SlashValidator)
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, signedBeaconBlock.Block().Body().AttesterSlashings(), v.SlashValidator)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attester slashing")
|
||||
}
|
||||
state, err = altair.ProcessAttestationsNoVerifySignature(ctx, state, signedBeaconBlock)
|
||||
st, err = altair.ProcessAttestationsNoVerifySignature(ctx, st, signedBeaconBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attestation")
|
||||
}
|
||||
if _, err := altair.ProcessDeposits(ctx, state, signedBeaconBlock.Block().Body().Deposits()); err != nil {
|
||||
if _, err := altair.ProcessDeposits(ctx, st, signedBeaconBlock.Block().Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
}
|
||||
return b.ProcessVoluntaryExits(ctx, state, signedBeaconBlock.Block().Body().VoluntaryExits())
|
||||
return b.ProcessVoluntaryExits(ctx, st, signedBeaconBlock.Block().Body().VoluntaryExits())
|
||||
}
|
||||
|
||||
// This calls phase 0 block operations.
|
||||
func phase0Operations(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
st state.BeaconState,
|
||||
signedBeaconBlock interfaces.SignedBeaconBlock) (state.BeaconState, error) {
|
||||
state, err := b.ProcessProposerSlashings(ctx, state, signedBeaconBlock.Block().Body().ProposerSlashings(), v.SlashValidator)
|
||||
st, err := b.ProcessProposerSlashings(ctx, st, signedBeaconBlock.Block().Body().ProposerSlashings(), v.SlashValidator)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block proposer slashings")
|
||||
}
|
||||
state, err = b.ProcessAttesterSlashings(ctx, state, signedBeaconBlock.Block().Body().AttesterSlashings(), v.SlashValidator)
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, signedBeaconBlock.Block().Body().AttesterSlashings(), v.SlashValidator)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attester slashings")
|
||||
}
|
||||
state, err = b.ProcessAttestationsNoVerifySignature(ctx, state, signedBeaconBlock)
|
||||
st, err = b.ProcessAttestationsNoVerifySignature(ctx, st, signedBeaconBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attestations")
|
||||
}
|
||||
if _, err := b.ProcessDeposits(ctx, state, signedBeaconBlock.Block().Body().Deposits()); err != nil {
|
||||
if _, err := b.ProcessDeposits(ctx, st, signedBeaconBlock.Block().Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposits")
|
||||
}
|
||||
return b.ProcessVoluntaryExits(ctx, state, signedBeaconBlock.Block().Body().VoluntaryExits())
|
||||
return b.ProcessVoluntaryExits(ctx, st, signedBeaconBlock.Block().Body().VoluntaryExits())
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ go_library(
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/db/iface",
|
||||
# Other packages must use github.com/prysmaticlabs/prysm/beacon-chain/db.Database alias.
|
||||
visibility = ["//beacon-chain/db:__subpackages__"],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/slasher/types:go_default_library",
|
||||
|
||||
@@ -53,6 +53,7 @@ type ReadOnlyDatabase interface {
|
||||
PowchainData(ctx context.Context) (*ethpb.ETH1ChainData, error)
|
||||
// Fee reicipients operations.
|
||||
FeeRecipientByValidatorID(ctx context.Context, id types.ValidatorIndex) (common.Address, error)
|
||||
RegistrationByValidatorID(ctx context.Context, id types.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
// origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
|
||||
@@ -17,6 +17,7 @@ go_library(
|
||||
"log.go",
|
||||
"migration.go",
|
||||
"migration_archived_index.go",
|
||||
"migration_blinded_beacon_blocks.go",
|
||||
"migration_block_slot_index.go",
|
||||
"migration_state_validators.go",
|
||||
"powchain.go",
|
||||
@@ -60,11 +61,11 @@ go_library(
|
||||
"@com_github_dgraph_io_ristretto//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ferranbt_fastssz//:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_prombbolt//:go_default_library",
|
||||
"@com_github_schollz_progressbar_v3//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
// LastArchivedSlot from the db.
|
||||
func (s *Store) LastArchivedSlot(ctx context.Context) (types.Slot, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedSlot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedSlot")
|
||||
defer span.End()
|
||||
var index types.Slot
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
@@ -26,7 +26,7 @@ func (s *Store) LastArchivedSlot(ctx context.Context) (types.Slot, error) {
|
||||
|
||||
// LastArchivedRoot from the db.
|
||||
func (s *Store) LastArchivedRoot(ctx context.Context) [32]byte {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedRoot")
|
||||
defer span.End()
|
||||
|
||||
var blockRoot []byte
|
||||
@@ -44,7 +44,7 @@ func (s *Store) LastArchivedRoot(ctx context.Context) [32]byte {
|
||||
// ArchivedPointRoot returns the block root of an archived point from the DB.
|
||||
// This is essential for cold state management and to restore a cold state.
|
||||
func (s *Store) ArchivedPointRoot(ctx context.Context, slot types.Slot) [32]byte {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.ArchivedPointRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.ArchivedPointRoot")
|
||||
defer span.End()
|
||||
|
||||
var blockRoot []byte
|
||||
@@ -61,7 +61,7 @@ func (s *Store) ArchivedPointRoot(ctx context.Context, slot types.Slot) [32]byte
|
||||
|
||||
// HasArchivedPoint returns true if an archived point exists in DB.
|
||||
func (s *Store) HasArchivedPoint(ctx context.Context, slot types.Slot) bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.HasArchivedPoint")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasArchivedPoint")
|
||||
defer span.End()
|
||||
var exists bool
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
|
||||
@@ -6,10 +6,11 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
ssz "github.com/ferranbt/fastssz"
|
||||
"github.com/golang/snappy"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
@@ -53,7 +54,7 @@ func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.Signe
|
||||
// at the time the chain was started, used to initialize the database and chain
|
||||
// without syncing from genesis.
|
||||
func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.OriginCheckpointBlockRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.OriginCheckpointBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
var root [32]byte
|
||||
@@ -72,7 +73,7 @@ func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
|
||||
// BackfillBlockRoot keeps track of the highest block available before the OriginCheckpointBlockRoot
|
||||
func (s *Store) BackfillBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
var root [32]byte
|
||||
@@ -130,7 +131,7 @@ func (s *Store) Blocks(ctx context.Context, f *filters.QueryFilter) ([]interface
|
||||
encoded := bkt.Get(keys[i])
|
||||
blk, err := unmarshalBlock(ctx, encoded)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "could not unmarshal block with key %#x", keys[i])
|
||||
}
|
||||
blocks = append(blocks, blk)
|
||||
blockRoots = append(blockRoots, bytesutil.ToBytes32(keys[i]))
|
||||
@@ -168,7 +169,7 @@ func (s *Store) BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32]b
|
||||
|
||||
// HasBlock checks if a block by root exists in the db.
|
||||
func (s *Store) HasBlock(ctx context.Context, blockRoot [32]byte) bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.HasBlock")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasBlock")
|
||||
defer span.End()
|
||||
if v, ok := s.blockCache.Get(string(blockRoot[:])); v != nil && ok {
|
||||
return true
|
||||
@@ -304,6 +305,16 @@ func (s *Store) SaveBlocks(ctx context.Context, blocks []interfaces.SignedBeacon
|
||||
if err := updateValueForIndices(ctx, indicesForBlocks[i], blockRoots[i], tx); err != nil {
|
||||
return errors.Wrap(err, "could not update DB indices")
|
||||
}
|
||||
if features.Get().EnableOnlyBlindedBeaconBlocks {
|
||||
blindedBlock, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
if !errors.Is(err, wrapper.ErrUnsupportedVersion) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
blk = blindedBlock
|
||||
}
|
||||
}
|
||||
s.blockCache.Set(string(blockRoots[i]), blk, int64(len(encodedBlocks[i])))
|
||||
if err := bkt.Put(blockRoots[i], encodedBlocks[i]); err != nil {
|
||||
return err
|
||||
@@ -315,7 +326,7 @@ func (s *Store) SaveBlocks(ctx context.Context, blocks []interfaces.SignedBeacon
|
||||
|
||||
// SaveHeadBlockRoot to the db.
|
||||
func (s *Store) SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveHeadBlockRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveHeadBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
hasStateSummary := s.hasStateSummaryBytes(tx, blockRoot)
|
||||
@@ -366,7 +377,7 @@ func (s *Store) GenesisBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
|
||||
// SaveGenesisBlockRoot to the db.
|
||||
func (s *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveGenesisBlockRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveGenesisBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
@@ -379,7 +390,7 @@ func (s *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) er
|
||||
// This value is used by a running beacon chain node to locate the state at the beginning
|
||||
// of the chain history, in places where genesis would typically be used.
|
||||
func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveOriginCheckpointBlockRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveOriginCheckpointBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
@@ -390,7 +401,7 @@ func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32
|
||||
// SaveBackfillBlockRoot is used to keep track of the most recently backfilled block root when
|
||||
// the node was initialized via checkpoint sync.
|
||||
func (s *Store) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
@@ -489,7 +500,7 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id types.Validato
|
||||
// SaveFeeRecipientsByValidatorIDs saves the fee recipients for validator ids.
|
||||
// Error is returned if `ids` and `recipients` are not the same length.
|
||||
func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, feeRecipients []common.Address) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveFeeRecipientByValidatorID")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveFeeRecipientByValidatorID")
|
||||
defer span.End()
|
||||
|
||||
if len(ids) != len(feeRecipients) {
|
||||
@@ -527,7 +538,7 @@ func (s *Store) RegistrationByValidatorID(ctx context.Context, id types.Validato
|
||||
// SaveRegistrationsByValidatorIDs saves the validator registrations for validator ids.
|
||||
// Error is returned if `ids` and `registrations` are not the same length.
|
||||
func (s *Store) SaveRegistrationsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveRegistrationsByValidatorIDs")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveRegistrationsByValidatorIDs")
|
||||
defer span.End()
|
||||
|
||||
if len(ids) != len(regs) {
|
||||
@@ -614,7 +625,7 @@ func blockRootsBySlotRange(
|
||||
bkt *bolt.Bucket,
|
||||
startSlotEncoded, endSlotEncoded, startEpochEncoded, endEpochEncoded, slotStepEncoded interface{},
|
||||
) ([][]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlotRange")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlotRange")
|
||||
defer span.End()
|
||||
|
||||
// Return nothing when all slot parameters are missing
|
||||
@@ -679,7 +690,7 @@ func blockRootsBySlotRange(
|
||||
|
||||
// blockRootsBySlot retrieves the block roots by slot
|
||||
func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) ([][32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlot")
|
||||
defer span.End()
|
||||
|
||||
bkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
@@ -700,7 +711,7 @@ func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) ([][32]
|
||||
// a map of bolt DB index buckets corresponding to each particular key for indices for
|
||||
// data, such as (shard indices bucket -> shard 5).
|
||||
func createBlockIndicesFromBlock(ctx context.Context, block interfaces.BeaconBlock) map[string][]byte {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromBlock")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromBlock")
|
||||
defer span.End()
|
||||
indicesByBucket := make(map[string][]byte)
|
||||
// Every index has a unique bucket for fast, binary-search
|
||||
@@ -728,7 +739,7 @@ func createBlockIndicesFromBlock(ctx context.Context, block interfaces.BeaconBlo
|
||||
// objects. If a certain filter criterion does not apply to
|
||||
// blocks, an appropriate error is returned.
|
||||
func createBlockIndicesFromFilters(ctx context.Context, f *filters.QueryFilter) (map[string][]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromFilters")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromFilters")
|
||||
defer span.End()
|
||||
indicesByBucket := make(map[string][]byte)
|
||||
for k, v := range f.Filters() {
|
||||
@@ -758,7 +769,7 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.SignedBeaconBlock
|
||||
var err error
|
||||
enc, err = snappy.Decode(nil, enc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not snappy decode block")
|
||||
}
|
||||
var rawBlock ssz.Unmarshaler
|
||||
switch {
|
||||
@@ -766,23 +777,23 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.SignedBeaconBlock
|
||||
// Marshal block bytes to altair beacon block.
|
||||
rawBlock = ðpb.SignedBeaconBlockAltair{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(altairKey):]); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not unmarshal Altair block")
|
||||
}
|
||||
case hasBellatrixKey(enc):
|
||||
rawBlock = ðpb.SignedBeaconBlockBellatrix{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(bellatrixKey):]); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not unmarshal Bellatrix block")
|
||||
}
|
||||
case hasBellatrixBlindKey(enc):
|
||||
rawBlock = ðpb.SignedBlindedBeaconBlockBellatrix{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(bellatrixBlindKey):]); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Bellatrix block")
|
||||
}
|
||||
default:
|
||||
// Marshal block bytes to phase 0 beacon block.
|
||||
rawBlock = ðpb.SignedBeaconBlock{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not unmarshal Phase0 block")
|
||||
}
|
||||
}
|
||||
return wrapper.WrappedSignedBeaconBlock(rawBlock)
|
||||
@@ -790,19 +801,41 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.SignedBeaconBlock
|
||||
|
||||
// marshal versioned beacon block from struct type down to bytes.
|
||||
func marshalBlock(_ context.Context, blk interfaces.SignedBeaconBlock) ([]byte, error) {
|
||||
obj, err := blk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var encodedBlock []byte
|
||||
var err error
|
||||
blockToSave := blk
|
||||
if features.Get().EnableOnlyBlindedBeaconBlocks {
|
||||
blindedBlock, err := blk.ToBlinded()
|
||||
switch {
|
||||
case errors.Is(err, wrapper.ErrUnsupportedVersion):
|
||||
encodedBlock, err = blk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal non-blinded block")
|
||||
}
|
||||
case err != nil:
|
||||
return nil, errors.Wrap(err, "could not convert block to blinded format")
|
||||
default:
|
||||
encodedBlock, err = blindedBlock.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal blinded block")
|
||||
}
|
||||
blockToSave = blindedBlock
|
||||
}
|
||||
} else {
|
||||
encodedBlock, err = blk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
switch blk.Version() {
|
||||
switch blockToSave.Version() {
|
||||
case version.BellatrixBlind:
|
||||
return snappy.Encode(nil, append(bellatrixBlindKey, obj...)), nil
|
||||
return snappy.Encode(nil, append(bellatrixBlindKey, encodedBlock...)), nil
|
||||
case version.Bellatrix:
|
||||
return snappy.Encode(nil, append(bellatrixKey, obj...)), nil
|
||||
return snappy.Encode(nil, append(bellatrixKey, encodedBlock...)), nil
|
||||
case version.Altair:
|
||||
return snappy.Encode(nil, append(altairKey, obj...)), nil
|
||||
return snappy.Encode(nil, append(altairKey, encodedBlock...)), nil
|
||||
case version.Phase0:
|
||||
return snappy.Encode(nil, obj), nil
|
||||
return snappy.Encode(nil, encodedBlock), nil
|
||||
default:
|
||||
return nil, errors.New("Unknown block version")
|
||||
}
|
||||
|
||||
@@ -134,11 +134,17 @@ func TestStore_BlocksCRUD(t *testing.T) {
|
||||
retrievedBlock, err := db.Block(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, nil, retrievedBlock, "Expected nil block")
|
||||
|
||||
require.NoError(t, db.SaveBlock(ctx, blk))
|
||||
assert.Equal(t, true, db.HasBlock(ctx, blockRoot), "Expected block to exist in the db")
|
||||
retrievedBlock, err = db.Block(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(blk.Proto(), retrievedBlock.Proto()), "Wanted: %v, received: %v", blk, retrievedBlock)
|
||||
wanted := retrievedBlock
|
||||
if _, err := retrievedBlock.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = retrievedBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), retrievedBlock.Proto()), "Wanted: %v, received: %v", wanted, retrievedBlock)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -314,7 +320,13 @@ func TestStore_BlocksCRUD_NoCache(t *testing.T) {
|
||||
assert.Equal(t, true, db.HasBlock(ctx, blockRoot), "Expected block to exist in the db")
|
||||
retrievedBlock, err = db.Block(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(blk.Proto(), retrievedBlock.Proto()), "Wanted: %v, received: %v", blk, retrievedBlock)
|
||||
|
||||
wanted := blk
|
||||
if _, err := blk.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = blk.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), retrievedBlock.Proto()), "Wanted: %v, received: %v", wanted, retrievedBlock)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -524,7 +536,12 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
root := roots[0]
|
||||
b, err := db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block1.Proto(), b.Proto()), "Wanted: %v, received: %v", block1, b)
|
||||
wanted := block1
|
||||
if _, err := block1.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), b.Proto()), "Wanted: %v, received: %v", wanted, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 11)
|
||||
require.NoError(t, err)
|
||||
@@ -533,7 +550,12 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block2.Proto(), b.Proto()), "Wanted: %v, received: %v", block2, b)
|
||||
wanted2 := block2
|
||||
if _, err := block2.PbBellatrixBlock(); err == nil {
|
||||
wanted2, err = block2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted2.Proto(), b.Proto()), "Wanted: %v, received: %v", wanted2, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 101)
|
||||
require.NoError(t, err)
|
||||
@@ -542,7 +564,12 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block3.Proto(), b.Proto()), "Wanted: %v, received: %v", block3, b)
|
||||
wanted = block3
|
||||
if _, err := block3.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), b.Proto()), "Wanted: %v, received: %v", wanted, b)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -569,7 +596,12 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
root := roots[0]
|
||||
b, err := db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block1.Proto(), b.Proto()), "Wanted: %v, received: %v", block1, b)
|
||||
wanted := block1
|
||||
if _, err := block1.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = block1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), b.Proto()), "Wanted: %v, received: %v", wanted, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -577,7 +609,12 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(genesisBlock.Proto(), b.Proto()), "Wanted: %v, received: %v", genesisBlock, b)
|
||||
wanted = genesisBlock
|
||||
if _, err := genesisBlock.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), b.Proto()), "Wanted: %v, received: %v", wanted, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
@@ -585,7 +622,12 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(genesisBlock.Proto(), b.Proto()), "Wanted: %v, received: %v", genesisBlock, b)
|
||||
wanted = genesisBlock
|
||||
if _, err := genesisBlock.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), b.Proto()), "Wanted: %v, received: %v", wanted, b)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -671,15 +713,31 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
assert.Equal(t, 0, len(retrievedBlocks), "Unexpected number of blocks received, expected none")
|
||||
retrievedBlocks, err = db.BlocksBySlot(ctx, 20)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(b1.Proto(), retrievedBlocks[0].Proto()), "Wanted: %v, received: %v", b1, retrievedBlocks[0])
|
||||
|
||||
wanted := b1
|
||||
if _, err := b1.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = b1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(retrievedBlocks[0].Proto(), wanted.Proto()), "Wanted: %v, received: %v", retrievedBlocks[0], wanted)
|
||||
assert.Equal(t, true, len(retrievedBlocks) > 0, "Expected to have blocks")
|
||||
retrievedBlocks, err = db.BlocksBySlot(ctx, 100)
|
||||
require.NoError(t, err)
|
||||
if len(retrievedBlocks) != 2 {
|
||||
t.Fatalf("Expected 2 blocks, received %d blocks", len(retrievedBlocks))
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(b2.Proto(), retrievedBlocks[0].Proto()), "Wanted: %v, received: %v", b2, retrievedBlocks[0])
|
||||
assert.Equal(t, true, proto.Equal(b3.Proto(), retrievedBlocks[1].Proto()), "Wanted: %v, received: %v", b3, retrievedBlocks[1])
|
||||
wanted = b2
|
||||
if _, err := b2.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = b2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), retrievedBlocks[0].Proto()), "Wanted: %v, received: %v", retrievedBlocks[0], wanted)
|
||||
wanted = b3
|
||||
if _, err := b3.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = b3.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(retrievedBlocks[1].Proto(), wanted.Proto()), "Wanted: %v, received: %v", retrievedBlocks[1], wanted)
|
||||
assert.Equal(t, true, len(retrievedBlocks) > 0, "Expected to have blocks")
|
||||
|
||||
hasBlockRoots, retrievedBlockRoots, err := db.BlockRootsBySlot(ctx, 1)
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
// DepositContractAddress returns contract address is the address of
|
||||
// the deposit contract on the proof of work chain.
|
||||
func (s *Store) DepositContractAddress(ctx context.Context) ([]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.DepositContractAddress")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.DepositContractAddress")
|
||||
defer span.End()
|
||||
var addr []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
@@ -27,7 +27,7 @@ func (s *Store) DepositContractAddress(ctx context.Context) ([]byte, error) {
|
||||
|
||||
// SaveDepositContractAddress to the db. It returns an error if an address has been previously saved.
|
||||
func (s *Store) SaveDepositContractAddress(ctx context.Context, addr common.Address) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.VerifyContractAddress")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.VerifyContractAddress")
|
||||
defer span.End()
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
|
||||
@@ -5,15 +5,15 @@ import (
|
||||
"errors"
|
||||
"reflect"
|
||||
|
||||
fastssz "github.com/ferranbt/fastssz"
|
||||
"github.com/golang/snappy"
|
||||
fastssz "github.com/prysmaticlabs/fastssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func decode(ctx context.Context, data []byte, dst proto.Message) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.decode")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.decode")
|
||||
defer span.End()
|
||||
|
||||
data, err := snappy.Decode(nil, data)
|
||||
@@ -27,7 +27,7 @@ func decode(ctx context.Context, data []byte, dst proto.Message) error {
|
||||
}
|
||||
|
||||
func encode(ctx context.Context, msg proto.Message) ([]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.encode")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.encode")
|
||||
defer span.End()
|
||||
|
||||
if msg == nil || reflect.ValueOf(msg).IsNil() {
|
||||
|
||||
@@ -166,7 +166,7 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
|
||||
// Note: beacon blocks from the latest finalized epoch return true, whether or not they are
|
||||
// considered canonical in the "head view" of the beacon node.
|
||||
func (s *Store) IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.IsFinalizedBlock")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.IsFinalizedBlock")
|
||||
defer span.End()
|
||||
|
||||
var exists bool
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
)
|
||||
|
||||
@@ -9,4 +10,7 @@ func init() {
|
||||
if err := params.SetActive(params.MainnetTestConfig()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
features.Init(&features.Flags{
|
||||
EnableOnlyBlindedBeaconBlocks: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
prombolt "github.com/prysmaticlabs/prombbolt"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/iface"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/io/file"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
@@ -183,8 +185,13 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = prometheus.Register(createBoltCollector(kv.db))
|
||||
return kv, err
|
||||
if err = prometheus.Register(createBoltCollector(kv.db)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = kv.checkNeedsResync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
// ClearDB removes the previously stored database in the data directory.
|
||||
@@ -216,6 +223,23 @@ func (s *Store) DatabasePath() string {
|
||||
return s.databasePath
|
||||
}
|
||||
|
||||
func (s *Store) checkNeedsResync() error {
|
||||
return s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(migrationsBucket)
|
||||
hasDisabledFeature := !features.Get().EnableOnlyBlindedBeaconBlocks
|
||||
if hasDisabledFeature && bkt.Get(migrationBlindedBeaconBlocksKey) != nil {
|
||||
return fmt.Errorf(
|
||||
"you have disabled the flag %s, and your node must resync to ensure your "+
|
||||
"database is compatible. If you do not want to resync, please re-enable the %s flag",
|
||||
features.EnableOnlyBlindedBeaconBlocks.Name,
|
||||
features.EnableOnlyBlindedBeaconBlocks.Name,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func createBuckets(tx *bolt.Tx, buckets ...[]byte) error {
|
||||
for _, bucket := range buckets {
|
||||
if _, err := tx.CreateBucketIfNotExists(bucket); err != nil {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user