mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
18 Commits
execution-
...
unify-slas
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1a03981e29 | ||
|
|
952d0f2aa3 | ||
|
|
b9d65da9b5 | ||
|
|
ecd87610e5 | ||
|
|
3557f78746 | ||
|
|
d386571766 | ||
|
|
aeda04290e | ||
|
|
d393d3f0fd | ||
|
|
a9dfbd19f7 | ||
|
|
b5f0465fb4 | ||
|
|
0df4a695df | ||
|
|
bfc6b49058 | ||
|
|
9f59201e93 | ||
|
|
26ff9b15fc | ||
|
|
b4c42013d2 | ||
|
|
99a1c517bd | ||
|
|
dbb9bb2a1e | ||
|
|
f61efb3c61 |
@@ -342,9 +342,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "98013b40922e54a64996da49b939e0a88fe2456f68eedc5aee4ceba0f8623f71",
|
||||
sha256 = "4797a7e594a5b1f4c1c8080701613f3ee451b01ec0861499ea7d9b60877a6b23",
|
||||
urls = [
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.0/prysm-web-ui.tar.gz",
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.3/prysm-web-ui.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -12,13 +12,11 @@ go_library(
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -38,7 +36,6 @@ go_test(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -12,13 +12,12 @@ import (
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/tracing"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -129,51 +128,37 @@ func (c *Client) NodeURL() string {
|
||||
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
// do is a generic, opinionated request function to reduce boilerplate amongst the methods in this package api/client/builder/types.go.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) (res []byte, err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.client.do")
|
||||
defer func() {
|
||||
tracing.AnnotateError(span, err)
|
||||
span.End()
|
||||
}()
|
||||
|
||||
// do is a generic, opinionated GET function to reduce boilerplate amongst the getters in this packageapi/client/builder/types.go.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) ([]byte, error) {
|
||||
u := c.baseURL.ResolveReference(&url.URL{Path: path})
|
||||
|
||||
span.AddAttributes(trace.StringAttribute("url", u.String()),
|
||||
trace.StringAttribute("method", method))
|
||||
|
||||
log.Printf("requesting %s", u.String())
|
||||
req, err := http.NewRequestWithContext(ctx, method, u.String(), body)
|
||||
if err != nil {
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(req)
|
||||
}
|
||||
for _, o := range c.obvs {
|
||||
if err = o.observe(req); err != nil {
|
||||
return
|
||||
if err := o.observe(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
r, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
closeErr := r.Body.Close()
|
||||
if closeErr != nil {
|
||||
log.WithError(closeErr).Error("Failed to close response body")
|
||||
}
|
||||
err = r.Body.Close()
|
||||
}()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
err = non200Err(r)
|
||||
return
|
||||
return nil, non200Err(r)
|
||||
}
|
||||
res, err = io.ReadAll(r.Body)
|
||||
b, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "error reading http response body from builder server")
|
||||
return
|
||||
return nil, errors.Wrap(err, "error reading http response body from GetBlock")
|
||||
}
|
||||
return
|
||||
return b, nil
|
||||
}
|
||||
|
||||
var execHeaderTemplate = template.Must(template.New("").Parse(getExecHeaderPath))
|
||||
@@ -216,14 +201,8 @@ func (c *Client) GetHeader(ctx context.Context, slot types.Slot, parentHash [32]
|
||||
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
|
||||
// fields with 0x prefixes) and posts to the builder validator registration endpoint.
|
||||
func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.client.RegisterValidator")
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.Int64Attribute("num_reqs", int64(len(svr))))
|
||||
|
||||
if len(svr) == 0 {
|
||||
err := errors.Wrap(errMalformedRequest, "empty validator registration list")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
return errors.Wrap(errMalformedRequest, "empty validator registration list")
|
||||
}
|
||||
vs := make([]*SignedValidatorRegistration, len(svr))
|
||||
for i := 0; i < len(svr); i++ {
|
||||
@@ -231,11 +210,8 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
}
|
||||
body, err := json.Marshal(vs)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "error encoding the SignedValidatorRegistration value body in RegisterValidator")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
return errors.Wrap(err, "error encoding the SignedValidatorRegistration value body in RegisterValidator")
|
||||
}
|
||||
|
||||
_, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body))
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -31,22 +31,6 @@ func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||
if r.SignedValidatorRegistrationV1 == nil {
|
||||
r.SignedValidatorRegistrationV1 = ð.SignedValidatorRegistrationV1{}
|
||||
}
|
||||
o := struct {
|
||||
Message *ValidatorRegistration `json:"message,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{}
|
||||
if err := json.Unmarshal(b, &o); err != nil {
|
||||
return err
|
||||
}
|
||||
r.Message = o.Message.ValidatorRegistrationV1
|
||||
r.Signature = o.Signature
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
|
||||
@@ -61,33 +45,6 @@ func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
func (r *ValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||
if r.ValidatorRegistrationV1 == nil {
|
||||
r.ValidatorRegistrationV1 = ð.ValidatorRegistrationV1{}
|
||||
}
|
||||
o := struct {
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
|
||||
GasLimit string `json:"gas_limit,omitempty"`
|
||||
Timestamp string `json:"timestamp,omitempty"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
|
||||
}{}
|
||||
if err := json.Unmarshal(b, &o); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.FeeRecipient = o.FeeRecipient
|
||||
r.Pubkey = o.Pubkey
|
||||
var err error
|
||||
if r.GasLimit, err = strconv.ParseUint(o.GasLimit, 10, 64); err != nil {
|
||||
return errors.Wrap(err, "failed to parse gas limit")
|
||||
}
|
||||
if r.Timestamp, err = strconv.ParseUint(o.Timestamp, 10, 64); err != nil {
|
||||
return errors.Wrap(err, "failed to parse timestamp")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type Uint256 struct {
|
||||
*big.Int
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -32,8 +31,7 @@ func TestSignedValidatorRegistration_MarshalJSON(t *testing.T) {
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
a := &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr}
|
||||
je, err := json.Marshal(a)
|
||||
je, err := json.Marshal(&SignedValidatorRegistration{SignedValidatorRegistrationV1: svr})
|
||||
require.NoError(t, err)
|
||||
// decode with a struct w/ plain strings so we can check the string encoding of the hex fields
|
||||
un := struct {
|
||||
@@ -47,14 +45,6 @@ func TestSignedValidatorRegistration_MarshalJSON(t *testing.T) {
|
||||
require.Equal(t, "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", un.Signature)
|
||||
require.Equal(t, "0x0000000000000000000000000000000000000000", un.Message.FeeRecipient)
|
||||
require.Equal(t, "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", un.Message.Pubkey)
|
||||
|
||||
t.Run("roundtrip", func(t *testing.T) {
|
||||
b := &SignedValidatorRegistration{}
|
||||
if err := json.Unmarshal(je, b); err != nil {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, proto.Equal(a.SignedValidatorRegistrationV1, b.SignedValidatorRegistrationV1), true)
|
||||
})
|
||||
}
|
||||
|
||||
var testExampleHeaderResponse = `{
|
||||
|
||||
@@ -3,7 +3,6 @@ package async_test
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -17,7 +16,7 @@ func TestDebounce_NoEvents(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
timesHandled := 0
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
@@ -27,21 +26,21 @@ func TestDebounce_NoEvents(t *testing.T) {
|
||||
}()
|
||||
go func() {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
timesHandled++
|
||||
})
|
||||
wg.Done()
|
||||
}()
|
||||
if util.WaitTimeout(wg, interval*2) {
|
||||
t.Fatalf("Test should have exited by now, timed out")
|
||||
}
|
||||
assert.Equal(t, int32(0), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||
assert.Equal(t, 0, timesHandled, "Wrong number of handled calls")
|
||||
}
|
||||
|
||||
func TestDebounce_CtxClosing(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
timesHandled := 0
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
@@ -63,23 +62,23 @@ func TestDebounce_CtxClosing(t *testing.T) {
|
||||
}()
|
||||
go func() {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
timesHandled++
|
||||
})
|
||||
wg.Done()
|
||||
}()
|
||||
if util.WaitTimeout(wg, interval*2) {
|
||||
t.Fatalf("Test should have exited by now, timed out")
|
||||
}
|
||||
assert.Equal(t, int32(0), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||
assert.Equal(t, 0, timesHandled, "Wrong number of handled calls")
|
||||
}
|
||||
|
||||
func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
timesHandled := 0
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
timesHandled++
|
||||
})
|
||||
for i := 0; i < 100; i++ {
|
||||
eventsChan <- struct{}{}
|
||||
@@ -87,7 +86,7 @@ func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||
// We should expect 100 rapid fire changes to only have caused
|
||||
// 1 handler to trigger after the debouncing period.
|
||||
time.Sleep(interval * 2)
|
||||
assert.Equal(t, int32(1), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||
assert.Equal(t, 1, timesHandled, "Wrong number of handled calls")
|
||||
cancel()
|
||||
}
|
||||
|
||||
@@ -95,23 +94,23 @@ func TestDebounce_MultipleHandlerInvocation(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
timesHandled := 0
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
timesHandled++
|
||||
})
|
||||
for i := 0; i < 100; i++ {
|
||||
eventsChan <- struct{}{}
|
||||
}
|
||||
require.Equal(t, int32(0), atomic.LoadInt32(×Handled), "Events must prevent from handler execution")
|
||||
require.Equal(t, 0, timesHandled, "Events must prevent from handler execution")
|
||||
|
||||
// By this time the first event should be triggered.
|
||||
time.Sleep(2 * time.Second)
|
||||
assert.Equal(t, int32(1), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||
assert.Equal(t, 1, timesHandled, "Wrong number of handled calls")
|
||||
|
||||
// Second event.
|
||||
eventsChan <- struct{}{}
|
||||
time.Sleep(2 * time.Second)
|
||||
assert.Equal(t, int32(2), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||
assert.Equal(t, 2, timesHandled, "Wrong number of handled calls")
|
||||
|
||||
cancel()
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package async_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -12,15 +11,15 @@ import (
|
||||
func TestEveryRuns(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
i := int32(0)
|
||||
i := 0
|
||||
async.RunEvery(ctx, 100*time.Millisecond, func() {
|
||||
atomic.AddInt32(&i, 1)
|
||||
i++
|
||||
})
|
||||
|
||||
// Sleep for a bit and ensure the value has increased.
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
if atomic.LoadInt32(&i) == 0 {
|
||||
if i == 0 {
|
||||
t.Error("Counter failed to increment with ticker")
|
||||
}
|
||||
|
||||
@@ -29,12 +28,12 @@ func TestEveryRuns(t *testing.T) {
|
||||
// Sleep for a bit to let the cancel take place.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
last := atomic.LoadInt32(&i)
|
||||
last := i
|
||||
|
||||
// Sleep for a bit and ensure the value has not increased.
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
if atomic.LoadInt32(&i) != last {
|
||||
if i != last {
|
||||
t.Error("Counter incremented after stop")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,6 +63,7 @@ go_library(
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/forks/bellatrix:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
|
||||
@@ -83,7 +83,6 @@ type FinalizationFetcher interface {
|
||||
CurrentJustifiedCheckpt() *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckpt() *ethpb.Checkpoint
|
||||
VerifyFinalizedBlkDescendant(ctx context.Context, blockRoot [32]byte) error
|
||||
IsFinalized(ctx context.Context, blockRoot [32]byte) bool
|
||||
}
|
||||
|
||||
// OptimisticModeFetcher retrieves information about optimistic status of the node.
|
||||
@@ -308,15 +307,6 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
||||
return s.IsOptimisticForRoot(ctx, s.head.root)
|
||||
}
|
||||
|
||||
// IsFinalized returns true if the input root is finalized.
|
||||
// It first checks latest finalized root then checks finalized root index in DB.
|
||||
func (s *Service) IsFinalized(ctx context.Context, root [32]byte) bool {
|
||||
if s.ForkChoicer().FinalizedCheckpoint().Root == root {
|
||||
return true
|
||||
}
|
||||
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, root)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
|
||||
@@ -669,25 +669,3 @@ func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
||||
require.Equal(t, true, validated)
|
||||
|
||||
}
|
||||
|
||||
func TestService_IsFinalized(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}}
|
||||
r1 := [32]byte{'a'}
|
||||
require.NoError(t, c.ForkChoiceStore().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Root: r1,
|
||||
}))
|
||||
b := util.NewBeaconBlock()
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, b)
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{
|
||||
Root: br[:],
|
||||
}))
|
||||
require.Equal(t, true, c.IsFinalized(ctx, r1))
|
||||
require.Equal(t, true, c.IsFinalized(ctx, br))
|
||||
require.Equal(t, false, c.IsFinalized(ctx, [32]byte{'c'}))
|
||||
}
|
||||
|
||||
@@ -4,9 +4,7 @@ import "github.com/pkg/errors"
|
||||
|
||||
var (
|
||||
// ErrInvalidPayload is returned when the payload is invalid
|
||||
ErrInvalidPayload = invalidBlock{error: errors.New("received an INVALID payload from execution engine")}
|
||||
// ErrInvalidBlockHashPayloadStatus is returned when the payload has invalid block hash.
|
||||
ErrInvalidBlockHashPayloadStatus = invalidBlock{error: errors.New("received an INVALID_BLOCK_HASH payload from execution engine")}
|
||||
ErrInvalidPayload = errors.New("recevied an INVALID payload from execution engine")
|
||||
// ErrUndefinedExecutionEngineError is returned when the execution engine returns an error that is not defined
|
||||
ErrUndefinedExecutionEngineError = errors.New("received an undefined ee error")
|
||||
// errNilFinalizedInStore is returned when a nil finalized checkpt is returned from store.
|
||||
@@ -30,7 +28,7 @@ var (
|
||||
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
|
||||
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
|
||||
// errNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
|
||||
errNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
|
||||
errNotDescendantOfFinalized = invalidBlock{errors.New("not descendant of finalized checkpoint")}
|
||||
)
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
@@ -41,17 +39,16 @@ var (
|
||||
// The block violates certain fork choice rules (before finalized slot, not finalized ancestor)
|
||||
type invalidBlock struct {
|
||||
error
|
||||
root [32]byte
|
||||
}
|
||||
|
||||
type invalidBlockError interface {
|
||||
Error() string
|
||||
BlockRoot() [32]byte
|
||||
InvalidBlock() bool
|
||||
}
|
||||
|
||||
// BlockRoot returns the invalid block root.
|
||||
func (e invalidBlock) BlockRoot() [32]byte {
|
||||
return e.root
|
||||
// InvalidBlock returns true for `invalidBlock`.
|
||||
func (e invalidBlock) InvalidBlock() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsInvalidBlock returns true if the error has `invalidBlock`.
|
||||
@@ -59,22 +56,9 @@ func IsInvalidBlock(e error) bool {
|
||||
if e == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := e.(invalidBlockError)
|
||||
d, ok := e.(invalidBlockError)
|
||||
if !ok {
|
||||
return IsInvalidBlock(errors.Unwrap(e))
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// InvalidBlockRoot returns the invalid block root. If the error
|
||||
// doesn't have an invalid blockroot. [32]byte{} is returned.
|
||||
func InvalidBlockRoot(e error) [32]byte {
|
||||
if e == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
if !ok {
|
||||
return [32]byte{}
|
||||
}
|
||||
return d.BlockRoot()
|
||||
return d.InvalidBlock()
|
||||
}
|
||||
|
||||
@@ -8,18 +8,10 @@ import (
|
||||
)
|
||||
|
||||
func TestIsInvalidBlock(t *testing.T) {
|
||||
require.Equal(t, true, IsInvalidBlock(ErrInvalidPayload)) // Already wrapped.
|
||||
err := invalidBlock{error: ErrInvalidPayload}
|
||||
require.Equal(t, false, IsInvalidBlock(ErrInvalidPayload))
|
||||
err := invalidBlock{ErrInvalidPayload}
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
|
||||
newErr := errors.Wrap(err, "wrap me")
|
||||
require.Equal(t, true, IsInvalidBlock(newErr))
|
||||
}
|
||||
|
||||
func TestInvalidBlockRoot(t *testing.T) {
|
||||
require.Equal(t, [32]byte{}, InvalidBlockRoot(ErrUndefinedExecutionEngineError))
|
||||
require.Equal(t, [32]byte{}, InvalidBlockRoot(ErrInvalidPayload))
|
||||
|
||||
err := invalidBlock{error: ErrInvalidPayload, root: [32]byte{'a'}}
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
|
||||
}
|
||||
|
||||
@@ -39,27 +39,24 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
|
||||
headBlk := arg.headBlock
|
||||
if headBlk == nil || headBlk.IsNil() || headBlk.Body().IsNil() {
|
||||
log.Error("Head block is nil")
|
||||
return nil, nil
|
||||
return nil, errors.New("nil head block")
|
||||
}
|
||||
// Must not call fork choice updated until the transition conditions are met on the Pow network.
|
||||
isExecutionBlk, err := blocks.IsExecutionBlock(headBlk.Body())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine if head block is execution block")
|
||||
return nil, nil
|
||||
return nil, errors.Wrap(err, "could not determine if block is execution block")
|
||||
}
|
||||
if !isExecutionBlk {
|
||||
return nil, nil
|
||||
}
|
||||
headPayload, err := headBlk.Body().Execution()
|
||||
headPayload, err := headBlk.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get execution payload for head block")
|
||||
return nil, nil
|
||||
return nil, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
finalizedHash := s.ForkChoicer().FinalizedPayloadBlockHash()
|
||||
justifiedHash := s.ForkChoicer().JustifiedPayloadBlockHash()
|
||||
fcs := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: headPayload.BlockHash(),
|
||||
HeadBlockHash: headPayload.BlockHash,
|
||||
SafeBlockHash: justifiedHash[:],
|
||||
FinalizedBlockHash: finalizedHash[:],
|
||||
}
|
||||
@@ -67,8 +64,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
|
||||
hasAttr, attr, proposerId, err := s.getPayloadAttribute(ctx, arg.headState, nextSlot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head payload attribute")
|
||||
return nil, nil
|
||||
return nil, errors.Wrap(err, "could not get payload attribute")
|
||||
}
|
||||
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
|
||||
@@ -78,41 +74,32 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
forkchoiceUpdatedOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"headSlot": headBlk.Slot(),
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash())),
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
|
||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
|
||||
}).Info("Called fork choice updated with optimistic block")
|
||||
err := s.optimisticCandidateBlock(ctx, headBlk)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Optimistic block failed to be candidate")
|
||||
}
|
||||
return payloadID, nil
|
||||
return payloadID, s.optimisticCandidateBlock(ctx, headBlk)
|
||||
case powchain.ErrInvalidPayloadStatus:
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
headRoot := arg.headRoot
|
||||
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, headRoot, bytesutil.ToBytes32(headBlk.ParentRoot()), bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not set head root to invalid")
|
||||
return nil, nil
|
||||
return nil, err
|
||||
}
|
||||
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
|
||||
log.WithError(err).Error("Could not remove invalid block and state")
|
||||
return nil, nil
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r, err := s.cfg.ForkChoiceStore.Head(ctx, s.justifiedBalances.balances)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head root")
|
||||
return nil, nil
|
||||
return nil, err
|
||||
}
|
||||
b, err := s.getBlock(ctx, r)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head block")
|
||||
return nil, nil
|
||||
return nil, err
|
||||
}
|
||||
st, err := s.cfg.StateGen.StateByRoot(ctx, r)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head state")
|
||||
return nil, nil
|
||||
return nil, err
|
||||
}
|
||||
pid, err := s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: st,
|
||||
@@ -120,32 +107,25 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
headBlock: b.Block(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err // Returning err because it's recursive here.
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, r, b, st); err != nil {
|
||||
log.WithError(err).Error("could not save head after pruning invalid blocks")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": headBlk.Slot(),
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot[:])),
|
||||
"blockRoot": fmt.Sprintf("%#x", headRoot),
|
||||
"invalidCount": len(invalidRoots),
|
||||
"newHeadRoot": fmt.Sprintf("%#x", bytesutil.Trunc(r[:])),
|
||||
}).Warn("Pruned invalid blocks")
|
||||
return pid, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot}
|
||||
return pid, ErrInvalidPayload
|
||||
|
||||
default:
|
||||
log.WithError(err).Error(ErrUndefinedExecutionEngineError)
|
||||
return nil, nil
|
||||
return nil, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
}
|
||||
forkchoiceUpdatedValidNodeCount.Inc()
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, arg.headRoot); err != nil {
|
||||
log.WithError(err).Error("Could not set head root to valid")
|
||||
return nil, nil
|
||||
return nil, errors.Wrap(err, "could not set block to valid")
|
||||
}
|
||||
if hasAttr && payloadID != nil { // If the forkchoice update call has an attribute, update the proposer payload ID cache.
|
||||
if hasAttr { // If the forkchoice update call has an attribute, update the proposer payload ID cache.
|
||||
var pId [8]byte
|
||||
copy(pId[:], payloadID[:])
|
||||
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId)
|
||||
@@ -163,11 +143,11 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
|
||||
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
|
||||
return params.BeaconConfig().ZeroHash, nil
|
||||
}
|
||||
payload, err := blk.Block().Body().Execution()
|
||||
payload, err := blk.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
return bytesutil.ToBytes32(payload.BlockHash()), nil
|
||||
return bytesutil.ToBytes32(payload.BlockHash), nil
|
||||
}
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine on a new payload.
|
||||
@@ -188,14 +168,14 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
body := blk.Block().Body()
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(postStateHeader, body)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
|
||||
return false, errors.Wrap(invalidBlock{err}, "could not determine if execution is enabled")
|
||||
}
|
||||
if !enabled {
|
||||
return true, nil
|
||||
}
|
||||
payload, err := body.Execution()
|
||||
payload, err := body.ExecutionPayload()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not get execution payload")
|
||||
return false, errors.Wrap(invalidBlock{err}, "could not get execution payload")
|
||||
}
|
||||
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
||||
switch err {
|
||||
@@ -206,7 +186,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
return false, s.optimisticCandidateBlock(ctx, blk.Block())
|
||||
case powchain.ErrInvalidPayloadStatus:
|
||||
@@ -227,10 +207,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
"blockRoot": fmt.Sprintf("%#x", root),
|
||||
"invalidCount": len(invalidRoots),
|
||||
}).Warn("Pruned invalid blocks")
|
||||
return false, ErrInvalidPayload
|
||||
case powchain.ErrInvalidBlockHashPayloadStatus:
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
return false, ErrInvalidBlockHashPayloadStatus
|
||||
return false, invalidBlock{ErrInvalidPayload}
|
||||
default:
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
@@ -32,7 +31,6 @@ import (
|
||||
)
|
||||
|
||||
func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
params.BeaconConfig().SafeSlotsToImportOptimistically = 0
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
|
||||
@@ -75,6 +73,10 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
newForkchoiceErr error
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
name: "nil block",
|
||||
errString: "nil head block",
|
||||
},
|
||||
{
|
||||
name: "phase0 block",
|
||||
blk: func() interfaces.BeaconBlock {
|
||||
@@ -188,10 +190,6 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
_, err = service.notifyForkchoiceUpdate(ctx, arg)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
if tt.errString == ErrInvalidPayload.Error() {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, tt.headRoot, InvalidBlockRoot(err)) // Head root should be invalid. Not block root!
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -214,7 +212,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
// 2. forkchoice removes the weights of these blocks
|
||||
// 3. the blockchain package calls fcu to obtain heads G -> F -> D.
|
||||
|
||||
func Test_NotifyForkchoiceUpdateRecursive_Protoarray(t *testing.T) {
|
||||
func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
@@ -272,156 +270,14 @@ func Test_NotifyForkchoiceUpdateRecursive_Protoarray(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Insert blocks into forkchoice
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
fcs := protoarray.New()
|
||||
service.cfg.ForkChoiceStore = fcs
|
||||
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
|
||||
service.justifiedBalances.balances = []uint64{50, 100, 200}
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, brc, brb, [32]byte{'C'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, brd, brc, [32]byte{'D'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, bre, brb, [32]byte{'E'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 6, brf, bre, [32]byte{'F'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 7, brg, bre, [32]byte{'G'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// Insert Attestations to D, F and G so that they have higher weight than D
|
||||
// Ensure G is head
|
||||
fcs.ProcessAttestation(ctx, []uint64{0}, brd, 1)
|
||||
fcs.ProcessAttestation(ctx, []uint64{1}, brf, 1)
|
||||
fcs.ProcessAttestation(ctx, []uint64{2}, brg, 1)
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: bra}
|
||||
require.NoError(t, fcs.UpdateJustifiedCheckpoint(jc))
|
||||
headRoot, err := fcs.Head(ctx, []uint64{50, 100, 200})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, brg, headRoot)
|
||||
|
||||
// Prepare Engine Mock to return invalid unless head is D, LVH = E
|
||||
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: powchain.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: pe[:], OverrideValidHash: [32]byte{'D'}}
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
service.head = &head{
|
||||
state: st,
|
||||
block: wba,
|
||||
}
|
||||
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
|
||||
a := ¬ifyForkchoiceUpdateArg{
|
||||
headState: st,
|
||||
headBlock: wbg.Block(),
|
||||
headRoot: brg,
|
||||
}
|
||||
_, err = service.notifyForkchoiceUpdate(ctx, a)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, brf, InvalidBlockRoot(err))
|
||||
|
||||
// Ensure Head is D
|
||||
headRoot, err = fcs.Head(ctx, service.justifiedBalances.balances)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, brd, headRoot)
|
||||
|
||||
// Ensure F and G where removed but their parent E wasn't
|
||||
require.Equal(t, false, fcs.HasNode(brf))
|
||||
require.Equal(t, false, fcs.HasNode(brg))
|
||||
require.Equal(t, true, fcs.HasNode(bre))
|
||||
}
|
||||
|
||||
//
|
||||
//
|
||||
// A <- B <- C <- D
|
||||
// \
|
||||
// ---------- E <- F
|
||||
// \
|
||||
// ------ G
|
||||
// D is the current head, attestations for F and G come late, both are invalid.
|
||||
// We switch recursively to F then G and finally to D.
|
||||
//
|
||||
// We test:
|
||||
// 1. forkchoice removes blocks F and G from the forkchoice implementation
|
||||
// 2. forkchoice removes the weights of these blocks
|
||||
// 3. the blockchain package calls fcu to obtain heads G -> F -> D.
|
||||
|
||||
func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
// Prepare blocks
|
||||
ba := util.NewBeaconBlockBellatrix()
|
||||
ba.Block.Body.ExecutionPayload.BlockNumber = 1
|
||||
wba := util.SaveBlock(t, ctx, beaconDB, ba)
|
||||
bra, err := wba.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
bb := util.NewBeaconBlockBellatrix()
|
||||
bb.Block.Body.ExecutionPayload.BlockNumber = 2
|
||||
wbb := util.SaveBlock(t, ctx, beaconDB, bb)
|
||||
brb, err := wbb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
bc := util.NewBeaconBlockBellatrix()
|
||||
bc.Block.Body.ExecutionPayload.BlockNumber = 3
|
||||
wbc := util.SaveBlock(t, ctx, beaconDB, bc)
|
||||
brc, err := wbc.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
bd := util.NewBeaconBlockBellatrix()
|
||||
pd := [32]byte{'D'}
|
||||
bd.Block.Body.ExecutionPayload.BlockHash = pd[:]
|
||||
bd.Block.Body.ExecutionPayload.BlockNumber = 4
|
||||
wbd := util.SaveBlock(t, ctx, beaconDB, bd)
|
||||
brd, err := wbd.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
be := util.NewBeaconBlockBellatrix()
|
||||
pe := [32]byte{'E'}
|
||||
be.Block.Body.ExecutionPayload.BlockHash = pe[:]
|
||||
be.Block.Body.ExecutionPayload.BlockNumber = 5
|
||||
wbe := util.SaveBlock(t, ctx, beaconDB, be)
|
||||
bre, err := wbe.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
bf := util.NewBeaconBlockBellatrix()
|
||||
pf := [32]byte{'F'}
|
||||
bf.Block.Body.ExecutionPayload.BlockHash = pf[:]
|
||||
bf.Block.Body.ExecutionPayload.BlockNumber = 6
|
||||
bf.Block.ParentRoot = bre[:]
|
||||
wbf := util.SaveBlock(t, ctx, beaconDB, bf)
|
||||
brf, err := wbf.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
bg := util.NewBeaconBlockBellatrix()
|
||||
bg.Block.Body.ExecutionPayload.BlockNumber = 7
|
||||
pg := [32]byte{'G'}
|
||||
bg.Block.Body.ExecutionPayload.BlockHash = pg[:]
|
||||
bg.Block.ParentRoot = bre[:]
|
||||
wbg := util.SaveBlock(t, ctx, beaconDB, bg)
|
||||
brg, err := wbg.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Insert blocks into forkchoice
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
fcs := doublylinkedtree.New()
|
||||
service.cfg.ForkChoiceStore = fcs
|
||||
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
service.justifiedBalances.balances = []uint64{50, 100, 200}
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
@@ -462,10 +318,6 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
// Prepare Engine Mock to return invalid unless head is D, LVH = E
|
||||
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: powchain.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: pe[:], OverrideValidHash: [32]byte{'D'}}
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
service.head = &head{
|
||||
state: st,
|
||||
block: wba,
|
||||
}
|
||||
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
|
||||
@@ -475,9 +327,7 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
headRoot: brg,
|
||||
}
|
||||
_, err = service.notifyForkchoiceUpdate(ctx, a)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, brf, InvalidBlockRoot(err))
|
||||
|
||||
require.ErrorIs(t, ErrInvalidPayload, err)
|
||||
// Ensure Head is D
|
||||
headRoot, err = fcs.Head(ctx, service.justifiedBalances.balances)
|
||||
require.NoError(t, err)
|
||||
@@ -678,40 +528,16 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
newPayloadErr: ErrUndefinedExecutionEngineError,
|
||||
errString: ErrUndefinedExecutionEngineError.Error(),
|
||||
},
|
||||
{
|
||||
name: "invalid block hash error from ee",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.SignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
newPayloadErr: ErrInvalidBlockHashPayloadStatus,
|
||||
errString: ErrInvalidBlockHashPayloadStatus.Error(),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
e := &mockPOW.EngineClient{ErrNewPayload: tt.newPayloadErr, BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("b")),
|
||||
},
|
||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
e.BlockByHashMap[[32]byte{'b'}] = &v1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("3")),
|
||||
},
|
||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
@@ -764,15 +590,11 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
e := &mockPOW.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("b")),
|
||||
},
|
||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
e.BlockByHashMap[[32]byte{'b'}] = &v1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("3")),
|
||||
},
|
||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
|
||||
@@ -45,16 +45,12 @@ func logStateTransitionData(b interfaces.BeaconBlock) error {
|
||||
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
|
||||
}
|
||||
if b.Version() == version.Bellatrix {
|
||||
p, err := b.Body().Execution()
|
||||
p, err := b.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
|
||||
txs, err := p.Transactions()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("txCount", len(txs))
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash)))
|
||||
log = log.WithField("txCount", len(p.Transactions))
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
return nil
|
||||
@@ -66,30 +62,24 @@ func logBlockSyncStatus(block interfaces.BeaconBlock, blockRoot [32]byte, justif
|
||||
return err
|
||||
}
|
||||
level := log.Logger.GetLevel()
|
||||
|
||||
log = log.WithField("slot", block.Slot())
|
||||
if level >= logrus.DebugLevel {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(block.ParentRoot())[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime),
|
||||
}).Debug("Synced new block")
|
||||
} else {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
}).Info("Synced new block")
|
||||
log = log.WithField("slotInEpoch", block.Slot()%params.BeaconConfig().SlotsPerEpoch)
|
||||
log = log.WithField("justifiedEpoch", justified.Epoch)
|
||||
log = log.WithField("justifiedRoot", fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]))
|
||||
log = log.WithField("parentRoot", fmt.Sprintf("0x%s...", hex.EncodeToString(block.ParentRoot())[:8]))
|
||||
log = log.WithField("version", version.String(block.Version()))
|
||||
log = log.WithField("sinceSlotStartTime", prysmTime.Now().Sub(startTime))
|
||||
log = log.WithField("chainServiceProcessedTime", prysmTime.Now().Sub(receivedTime))
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
}).Info("Synced new block")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -102,18 +92,18 @@ func logPayload(block interfaces.BeaconBlock) error {
|
||||
if !isExecutionBlk {
|
||||
return nil
|
||||
}
|
||||
payload, err := block.Body().Execution()
|
||||
payload, err := block.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.GasLimit() == 0 {
|
||||
if payload.GasLimit == 0 {
|
||||
return errors.New("gas limit should not be 0")
|
||||
}
|
||||
gasUtilized := float64(payload.GasUsed()) / float64(payload.GasLimit())
|
||||
gasUtilized := float64(payload.GasUsed) / float64(payload.GasLimit)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash())),
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash)),
|
||||
"blockNumber": payload.BlockNumber,
|
||||
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
|
||||
}).Debug("Synced new payload")
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -40,17 +41,17 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
|
||||
if err := wrapper.BeaconBlockIsNil(b); err != nil {
|
||||
return err
|
||||
}
|
||||
payload, err := b.Block().Body().Execution()
|
||||
payload, err := b.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.IsNil() {
|
||||
if payload == nil {
|
||||
return errors.New("nil execution payload")
|
||||
}
|
||||
if err := validateTerminalBlockHash(b.Block().Slot(), payload); err != nil {
|
||||
return errors.Wrap(err, "could not validate terminal block hash")
|
||||
}
|
||||
mergeBlockParentHash, mergeBlockTD, err := s.getBlkParentHashAndTD(ctx, payload.ParentHash())
|
||||
mergeBlockParentHash, mergeBlockTD, err := s.getBlkParentHashAndTD(ctx, payload.ParentHash)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get merge block parent hash and total difficulty")
|
||||
}
|
||||
@@ -65,12 +66,12 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
|
||||
if !valid {
|
||||
err := fmt.Errorf("invalid TTD, configTTD: %s, currentTTD: %s, parentTTD: %s",
|
||||
params.BeaconConfig().TerminalTotalDifficulty, mergeBlockTD, mergeBlockParentTD)
|
||||
return invalidBlock{error: err}
|
||||
return invalidBlock{err}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Block().Slot(),
|
||||
"mergeBlockHash": common.BytesToHash(payload.ParentHash()).String(),
|
||||
"mergeBlockHash": common.BytesToHash(payload.ParentHash).String(),
|
||||
"mergeBlockParentHash": common.BytesToHash(mergeBlockParentHash).String(),
|
||||
"terminalTotalDifficulty": params.BeaconConfig().TerminalTotalDifficulty,
|
||||
"mergeBlockTotalDifficulty": mergeBlockTD,
|
||||
@@ -99,7 +100,7 @@ func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]
|
||||
if overflows {
|
||||
return nil, nil, errors.New("total difficulty overflows")
|
||||
}
|
||||
return blk.ParentHash[:], blkTDUint256, nil
|
||||
return blk.ParentHash, blkTDUint256, nil
|
||||
}
|
||||
|
||||
// validateTerminalBlockHash validates if the merge block is a valid terminal PoW block.
|
||||
@@ -109,14 +110,14 @@ func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]
|
||||
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
|
||||
// return
|
||||
func validateTerminalBlockHash(blkSlot types.Slot, payload interfaces.ExecutionData) error {
|
||||
func validateTerminalBlockHash(blkSlot types.Slot, payload *enginev1.ExecutionPayload) error {
|
||||
if bytesutil.ToBytes32(params.BeaconConfig().TerminalBlockHash.Bytes()) == [32]byte{} {
|
||||
return nil
|
||||
}
|
||||
if params.BeaconConfig().TerminalBlockHashActivationEpoch > slots.ToEpoch(blkSlot) {
|
||||
return errors.New("terminal block hash activation epoch not reached")
|
||||
}
|
||||
if !bytes.Equal(payload.ParentHash(), params.BeaconConfig().TerminalBlockHash.Bytes()) {
|
||||
if !bytes.Equal(payload.ParentHash, params.BeaconConfig().TerminalBlockHash.Bytes()) {
|
||||
return errors.New("parent hash does not match terminal block hash")
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -6,12 +6,12 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/holiman/uint256"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
mocks "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
@@ -120,19 +120,12 @@ func Test_validateMergeBlock(t *testing.T) {
|
||||
|
||||
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
a := [32]byte{'a'}
|
||||
b := [32]byte{'b'}
|
||||
mergeBlockParentHash := [32]byte{'3'}
|
||||
engine.BlockByHashMap[a] = &enginev1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: b,
|
||||
},
|
||||
engine.BlockByHashMap[[32]byte{'a'}] = &enginev1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
engine.BlockByHashMap[b] = &enginev1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: mergeBlockParentHash,
|
||||
},
|
||||
engine.BlockByHashMap[[32]byte{'b'}] = &enginev1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
@@ -140,18 +133,18 @@ func Test_validateMergeBlock(t *testing.T) {
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
ParentHash: a[:],
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
bk, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.validateMergeBlock(ctx, bk))
|
||||
require.NoError(t, service.validateMergeBlock(ctx, b))
|
||||
|
||||
cfg.TerminalTotalDifficulty = "1"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
err = service.validateMergeBlock(ctx, bk)
|
||||
err = service.validateMergeBlock(ctx, b)
|
||||
require.ErrorContains(t, "invalid TTD, configTTD: 1, currentTTD: 2, parentTTD: 1", err)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
@@ -174,9 +167,7 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
p := [32]byte{'b'}
|
||||
td := "0x1"
|
||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: p,
|
||||
},
|
||||
ParentHash: p[:],
|
||||
TotalDifficulty: td,
|
||||
}
|
||||
parentHash, totalDifficulty, err := service.getBlkParentHashAndTD(ctx, h[:])
|
||||
@@ -192,18 +183,14 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
require.ErrorContains(t, "pow block is nil", err)
|
||||
|
||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: p,
|
||||
},
|
||||
ParentHash: p[:],
|
||||
TotalDifficulty: "1",
|
||||
}
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
||||
require.ErrorContains(t, "could not decode merge block total difficulty: hex string without 0x prefix", err)
|
||||
|
||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: p,
|
||||
},
|
||||
ParentHash: p[:],
|
||||
TotalDifficulty: "0XFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
|
||||
}
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
||||
@@ -211,22 +198,16 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_validateTerminalBlockHash(t *testing.T) {
|
||||
wrapped, err := wrapper.WrappedExecutionPayload(&enginev1.ExecutionPayload{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, validateTerminalBlockHash(1, wrapped))
|
||||
require.NoError(t, validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalBlockHash = [32]byte{0x01}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
require.ErrorContains(t, "terminal block hash activation epoch not reached", validateTerminalBlockHash(1, wrapped))
|
||||
require.ErrorContains(t, "terminal block hash activation epoch not reached", validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
||||
|
||||
cfg.TerminalBlockHashActivationEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
require.ErrorContains(t, "parent hash does not match terminal block hash", validateTerminalBlockHash(1, wrapped))
|
||||
require.ErrorContains(t, "parent hash does not match terminal block hash", validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
||||
|
||||
wrapped, err = wrapper.WrappedExecutionPayload(&enginev1.ExecutionPayload{
|
||||
ParentHash: cfg.TerminalBlockHash.Bytes(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, validateTerminalBlockHash(1, wrapped))
|
||||
require.NoError(t, validateTerminalBlockHash(1, &enginev1.ExecutionPayload{ParentHash: cfg.TerminalBlockHash.Bytes()}))
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/forks/bellatrix"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
@@ -96,7 +97,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
|
||||
defer span.End()
|
||||
if err := wrapper.BeaconBlockIsNil(signed); err != nil {
|
||||
return invalidBlock{error: err}
|
||||
return invalidBlock{err}
|
||||
}
|
||||
b := signed.Block()
|
||||
|
||||
@@ -105,19 +106,13 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
return err
|
||||
}
|
||||
|
||||
// Save current justified and finalized epochs for future use.
|
||||
currStoreJustifiedEpoch := s.ForkChoicer().JustifiedCheckpoint().Epoch
|
||||
currStoreFinalizedEpoch := s.ForkChoicer().FinalizedCheckpoint().Epoch
|
||||
preStateFinalizedEpoch := preState.FinalizedCheckpoint().Epoch
|
||||
preStateJustifiedEpoch := preState.CurrentJustifiedCheckpoint().Epoch
|
||||
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return invalidBlock{error: err}
|
||||
return invalidBlock{err}
|
||||
}
|
||||
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
|
||||
if err != nil {
|
||||
@@ -125,7 +120,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
}
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, postStateVersion, postStateHeader, signed)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not validate new payload")
|
||||
return fmt.Errorf("could not verify new payload: %v", err)
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preStateVersion, preStateHeader, signed); err != nil {
|
||||
@@ -135,6 +130,9 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
// save current justified and finalized epochs for future use
|
||||
currJustifiedEpoch := s.ForkChoicer().JustifiedCheckpoint().Epoch
|
||||
currFinalizedEpoch := s.ForkChoicer().FinalizedCheckpoint().Epoch
|
||||
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
|
||||
@@ -183,9 +181,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
if err := s.notifyEngineIfChangedHead(ctx, headRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
s.notifyEngineIfChangedHead(ctx, headRoot)
|
||||
|
||||
if err := s.pruneCanonicalAttsFromPool(ctx, blockRoot, signed); err != nil {
|
||||
return err
|
||||
@@ -215,20 +211,16 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
}()
|
||||
|
||||
// Save justified check point to db.
|
||||
postStateJustifiedEpoch := postState.CurrentJustifiedCheckpoint().Epoch
|
||||
if justified.Epoch > currStoreJustifiedEpoch || (justified.Epoch == postStateJustifiedEpoch && justified.Epoch > preStateJustifiedEpoch) {
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{
|
||||
Epoch: justified.Epoch, Root: justified.Root[:],
|
||||
}); err != nil {
|
||||
if justified.Epoch > currJustifiedEpoch {
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, postState.CurrentJustifiedCheckpoint()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Save finalized check point to db and more.
|
||||
postStateFinalizedEpoch := postState.FinalizedCheckpoint().Epoch
|
||||
// Update finalized check point.
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
if finalized.Epoch > currStoreFinalizedEpoch || (finalized.Epoch == postStateFinalizedEpoch && finalized.Epoch > preStateFinalizedEpoch) {
|
||||
if err := s.updateFinalized(ctx, ðpb.Checkpoint{Epoch: finalized.Epoch, Root: finalized.Root[:]}); err != nil {
|
||||
if finalized.Epoch > currFinalizedEpoch {
|
||||
if err := s.updateFinalized(ctx, postState.FinalizedCheckpoint()); err != nil {
|
||||
return err
|
||||
}
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(finalized.Root)
|
||||
@@ -294,7 +286,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
}
|
||||
|
||||
if err := wrapper.BeaconBlockIsNil(blks[0]); err != nil {
|
||||
return invalidBlock{error: err}
|
||||
return invalidBlock{err}
|
||||
}
|
||||
b := blks[0].Block()
|
||||
|
||||
@@ -342,7 +334,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
|
||||
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
|
||||
if err != nil {
|
||||
return invalidBlock{error: err}
|
||||
return invalidBlock{err}
|
||||
}
|
||||
// Save potential boundary states.
|
||||
if slots.IsEpochStart(preState.Slot()) {
|
||||
@@ -363,7 +355,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
}
|
||||
verify, err := sigSet.Verify()
|
||||
if err != nil {
|
||||
return invalidBlock{error: err}
|
||||
return invalidBlock{err}
|
||||
}
|
||||
if !verify {
|
||||
return errors.New("batch block signature verification failed")
|
||||
@@ -501,15 +493,9 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockAndAttestationsToForkChoiceStore")
|
||||
defer span.End()
|
||||
|
||||
if !s.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(blk.ParentRoot())) {
|
||||
fCheckpoint := st.FinalizedCheckpoint()
|
||||
jCheckpoint := st.CurrentJustifiedCheckpoint()
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, st, root); err != nil {
|
||||
fCheckpoint := st.FinalizedCheckpoint()
|
||||
jCheckpoint := st.CurrentJustifiedCheckpoint()
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, st, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
// Feed in block's attestations to fork choice store.
|
||||
@@ -527,6 +513,13 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk interfaces.BeaconBlock, root [32]byte, st state.BeaconState, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
}
|
||||
|
||||
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
|
||||
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
|
||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
|
||||
@@ -586,15 +579,11 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
||||
}
|
||||
|
||||
// Skip validation if block has an empty payload.
|
||||
payload, err := blk.Block().Body().Execution()
|
||||
payload, err := blk.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return invalidBlock{error: err}
|
||||
return invalidBlock{err}
|
||||
}
|
||||
isEmpty, err := wrapper.IsEmptyExecutionData(payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isEmpty {
|
||||
if bellatrix.IsEmptyPayload(payload) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -114,7 +114,7 @@ func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byt
|
||||
bytesutil.Trunc(root[:]), fSlot, bytesutil.Trunc(bFinalizedRoot),
|
||||
bytesutil.Trunc(fRoot[:]))
|
||||
tracing.AnnotateError(span, err)
|
||||
return invalidBlock{error: err}
|
||||
return invalidBlock{err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -129,7 +129,7 @@ func (s *Service) verifyBlkFinalizedSlot(b interfaces.BeaconBlock) error {
|
||||
}
|
||||
if finalizedSlot >= b.Slot() {
|
||||
err = fmt.Errorf("block is equal or earlier than finalized block, slot %d < slot %d", b.Slot(), finalizedSlot)
|
||||
return invalidBlock{error: err}
|
||||
return invalidBlock{err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,8 +9,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/pkg/errors"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
@@ -1150,55 +1148,6 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
|
||||
require.Equal(t, 3*params.BeaconConfig().SlotsPerEpoch, service.nextEpochBoundarySlot)
|
||||
}
|
||||
|
||||
func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
|
||||
testState := gs.Copy()
|
||||
for i := types.Slot(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
blk, err := util.GenerateFullBlock(testState, keys, util.DefaultBlockGenConfig(), i)
|
||||
require.NoError(t, err)
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.NewSlot(ctx, i))
|
||||
require.NoError(t, service.onBlock(ctx, wsb, r))
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
cp := service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, types.Epoch(3), cp.Epoch)
|
||||
cp = service.FinalizedCheckpt()
|
||||
require.Equal(t, types.Epoch(2), cp.Epoch)
|
||||
|
||||
// The update should persist in DB.
|
||||
j, err := service.cfg.BeaconDB.JustifiedCheckpoint(ctx)
|
||||
require.NoError(t, err)
|
||||
cp = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, j.Epoch, cp.Epoch)
|
||||
f, err := service.cfg.BeaconDB.FinalizedCheckpoint(ctx)
|
||||
require.NoError(t, err)
|
||||
cp = service.FinalizedCheckpt()
|
||||
require.Equal(t, f.Epoch, cp.Epoch)
|
||||
}
|
||||
|
||||
func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
@@ -1489,11 +1438,9 @@ func Test_getStateVersionAndPayload(t *testing.T) {
|
||||
name: "bellatrix state",
|
||||
st: func() state.BeaconState {
|
||||
s, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(&enginev1.ExecutionPayloadHeader{
|
||||
require.NoError(t, s.SetLatestExecutionPayloadHeader(&enginev1.ExecutionPayloadHeader{
|
||||
BlockNumber: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
}))
|
||||
return s
|
||||
}(),
|
||||
version: version.Bellatrix,
|
||||
@@ -1531,9 +1478,6 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
aHash := common.BytesToHash([]byte("a"))
|
||||
bHash := common.BytesToHash([]byte("b"))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
stateVersion int
|
||||
@@ -1545,7 +1489,6 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
name: "state older than Bellatrix, nil payload",
|
||||
stateVersion: 1,
|
||||
payload: nil,
|
||||
errString: "attempted to wrap nil",
|
||||
},
|
||||
{
|
||||
name: "state older than Bellatrix, empty payload",
|
||||
@@ -1565,14 +1508,13 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
name: "state older than Bellatrix, non empty payload",
|
||||
stateVersion: 1,
|
||||
payload: &enginev1.ExecutionPayload{
|
||||
ParentHash: aHash[:],
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "state is Bellatrix, nil payload",
|
||||
stateVersion: 2,
|
||||
payload: nil,
|
||||
errString: "attempted to wrap nil",
|
||||
},
|
||||
{
|
||||
name: "state is Bellatrix, empty payload",
|
||||
@@ -1592,7 +1534,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
name: "state is Bellatrix, non empty payload, empty header",
|
||||
stateVersion: 2,
|
||||
payload: &enginev1.ExecutionPayload{
|
||||
ParentHash: aHash[:],
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
header: &enginev1.ExecutionPayloadHeader{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
@@ -1610,7 +1552,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
name: "state is Bellatrix, non empty payload, non empty header",
|
||||
stateVersion: 2,
|
||||
payload: &enginev1.ExecutionPayload{
|
||||
ParentHash: aHash[:],
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
header: &enginev1.ExecutionPayloadHeader{
|
||||
BlockNumber: 1,
|
||||
@@ -1620,7 +1562,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
name: "state is Bellatrix, non empty payload, nil header",
|
||||
stateVersion: 2,
|
||||
payload: &enginev1.ExecutionPayload{
|
||||
ParentHash: aHash[:],
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
errString: "nil header or block body",
|
||||
},
|
||||
@@ -1628,16 +1570,12 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
e := &mockPOW.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[aHash] = &enginev1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: bHash,
|
||||
},
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &enginev1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
e.BlockByHashMap[bHash] = &enginev1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("3")),
|
||||
},
|
||||
e.BlockByHashMap[[32]byte{'b'}] = &enginev1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
|
||||
@@ -166,35 +166,33 @@ func (s *Service) UpdateHead(ctx context.Context) error {
|
||||
}).Debug("Head changed due to attestations")
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
if err := s.notifyEngineIfChangedHead(ctx, newHeadRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
s.notifyEngineIfChangedHead(ctx, newHeadRoot)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This calls notify Forkchoice Update in the event that the head has changed
|
||||
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) error {
|
||||
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) {
|
||||
s.headLock.RLock()
|
||||
if newHeadRoot == [32]byte{} || s.headRoot() == newHeadRoot {
|
||||
s.headLock.RUnlock()
|
||||
return nil
|
||||
return
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
|
||||
if !s.hasBlockInInitSyncOrDB(ctx, newHeadRoot) {
|
||||
log.Debug("New head does not exist in DB. Do nothing")
|
||||
return nil // We don't have the block, don't notify the engine and update head.
|
||||
return // We don't have the block, don't notify the engine and update head.
|
||||
}
|
||||
|
||||
newHeadBlock, err := s.getBlock(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get new head block")
|
||||
return nil
|
||||
return
|
||||
}
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get state from db")
|
||||
return nil
|
||||
return
|
||||
}
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
headState: headState,
|
||||
@@ -203,12 +201,11 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdate(s.ctx, arg)
|
||||
if err != nil {
|
||||
return err
|
||||
log.WithError(err).Error("could not notify forkchoice update")
|
||||
}
|
||||
if err := s.saveHead(ctx, newHeadRoot, newHeadBlock, headState); err != nil {
|
||||
log.WithError(err).Error("could not save head")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
|
||||
@@ -131,7 +131,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
require.NoError(t, service.notifyEngineIfChangedHead(ctx, service.headRoot()))
|
||||
service.notifyEngineIfChangedHead(ctx, service.headRoot())
|
||||
hookErr := "could not notify forkchoice update"
|
||||
invalidStateErr := "Could not get state from db"
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
@@ -139,7 +139,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
gb, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.saveInitSyncBlock(ctx, [32]byte{'a'}, gb))
|
||||
require.NoError(t, service.notifyEngineIfChangedHead(ctx, [32]byte{'a'}))
|
||||
service.notifyEngineIfChangedHead(ctx, [32]byte{'a'})
|
||||
require.LogsContain(t, hook, invalidStateErr)
|
||||
|
||||
hook.Reset()
|
||||
@@ -164,7 +164,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
state: st,
|
||||
}
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
|
||||
require.NoError(t, service.notifyEngineIfChangedHead(ctx, r1))
|
||||
service.notifyEngineIfChangedHead(ctx, r1)
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
|
||||
@@ -182,7 +182,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
state: st,
|
||||
}
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
|
||||
require.NoError(t, service.notifyEngineIfChangedHead(ctx, r1))
|
||||
service.notifyEngineIfChangedHead(ctx, r1)
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2)
|
||||
@@ -192,7 +192,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
|
||||
// Test zero headRoot returns immediately.
|
||||
headRoot := service.headRoot()
|
||||
require.NoError(t, service.notifyEngineIfChangedHead(ctx, [32]byte{}))
|
||||
service.notifyEngineIfChangedHead(ctx, [32]byte{})
|
||||
require.Equal(t, service.headRoot(), headRoot)
|
||||
}
|
||||
|
||||
|
||||
@@ -63,7 +63,6 @@ type ChainService struct {
|
||||
ForkChoiceStore forkchoice.ForkChoicer
|
||||
ReceiveBlockMockErr error
|
||||
OptimisticCheckRootReceived [32]byte
|
||||
FinalizedRoots map[[32]byte]bool
|
||||
}
|
||||
|
||||
// ForkChoicer mocks the same method in the chain service
|
||||
@@ -459,8 +458,3 @@ func (s *ChainService) UpdateHead(_ context.Context) error { return nil }
|
||||
|
||||
// ReceiveAttesterSlashing mocks the same method in the chain service.
|
||||
func (s *ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {}
|
||||
|
||||
// IsFinalized mocks the same method in the chain service.
|
||||
func (s *ChainService) IsFinalized(_ context.Context, blockRoot [32]byte) bool {
|
||||
return s.FinalizedRoots[blockRoot]
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/forks/bellatrix:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
@@ -88,6 +89,7 @@ go_test(
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/forks/bellatrix:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/forks/bellatrix"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
@@ -38,15 +39,7 @@ func IsMergeTransitionComplete(st state.BeaconState) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(h)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
isEmpty, err := wrapper.IsEmptyExecutionData(wrappedHeader)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !isEmpty, nil
|
||||
return !bellatrix.IsEmptyHeader(h), nil
|
||||
}
|
||||
|
||||
// IsMergeTransitionBlockUsingPreStatePayloadHeader returns true if the input block is the terminal merge block.
|
||||
@@ -56,15 +49,7 @@ func IsMergeTransitionBlockUsingPreStatePayloadHeader(h *enginev1.ExecutionPaylo
|
||||
if h == nil || body == nil {
|
||||
return false, errors.New("nil header or block body")
|
||||
}
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(h)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
isEmpty, err := wrapper.IsEmptyExecutionData(wrappedHeader)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !isEmpty {
|
||||
if !bellatrix.IsEmptyHeader(h) {
|
||||
return false, nil
|
||||
}
|
||||
return IsExecutionBlock(body)
|
||||
@@ -79,7 +64,7 @@ func IsExecutionBlock(body interfaces.BeaconBlockBody) (bool, error) {
|
||||
if body == nil {
|
||||
return false, errors.New("nil block body")
|
||||
}
|
||||
payload, err := body.Execution()
|
||||
payload, err := body.ExecutionPayload()
|
||||
switch {
|
||||
case errors.Is(err, wrapper.ErrUnsupportedField):
|
||||
return false, nil
|
||||
@@ -87,11 +72,7 @@ func IsExecutionBlock(body interfaces.BeaconBlockBody) (bool, error) {
|
||||
return false, err
|
||||
default:
|
||||
}
|
||||
isEmpty, err := wrapper.IsEmptyExecutionData(payload)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !isEmpty, nil
|
||||
return !bellatrix.IsEmptyPayload(payload), nil
|
||||
}
|
||||
|
||||
// IsExecutionEnabled returns true if the beacon chain can begin executing.
|
||||
@@ -117,15 +98,7 @@ func IsExecutionEnabled(st state.BeaconState, body interfaces.BeaconBlockBody) (
|
||||
// IsExecutionEnabledUsingHeader returns true if the execution is enabled using post processed payload header and block body.
|
||||
// This is an optimized version of IsExecutionEnabled where beacon state is not required as an argument.
|
||||
func IsExecutionEnabledUsingHeader(header *enginev1.ExecutionPayloadHeader, body interfaces.BeaconBlockBody) (bool, error) {
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(header)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
isEmpty, err := wrapper.IsEmptyExecutionData(wrappedHeader)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !isEmpty {
|
||||
if !bellatrix.IsEmptyHeader(header) {
|
||||
return true, nil
|
||||
}
|
||||
return IsExecutionBlock(body)
|
||||
@@ -143,7 +116,7 @@ func IsPreBellatrixVersion(v int) bool {
|
||||
// # Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
// if is_merge_complete(state):
|
||||
// assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload interfaces.ExecutionData) error {
|
||||
func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload *enginev1.ExecutionPayload) error {
|
||||
complete, err := IsMergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -156,7 +129,7 @@ func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload interfaces.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(payload.ParentHash(), header.BlockHash) {
|
||||
if !bytes.Equal(payload.ParentHash, header.BlockHash) {
|
||||
return errors.New("incorrect block hash")
|
||||
}
|
||||
return nil
|
||||
@@ -170,20 +143,20 @@ func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload interfaces.
|
||||
// assert payload.random == get_randao_mix(state, get_current_epoch(state))
|
||||
// # Verify timestamp
|
||||
// assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
|
||||
func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) error {
|
||||
func ValidatePayload(st state.BeaconState, payload *enginev1.ExecutionPayload) error {
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(payload.PrevRandao(), random) {
|
||||
if !bytes.Equal(payload.PrevRandao, random) {
|
||||
return ErrInvalidPayloadPrevRandao
|
||||
}
|
||||
t, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.Timestamp() != uint64(t.Unix()) {
|
||||
if payload.Timestamp != uint64(t.Unix()) {
|
||||
return ErrInvalidPayloadTimeStamp
|
||||
}
|
||||
return nil
|
||||
@@ -221,29 +194,27 @@ func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) err
|
||||
// block_hash=payload.block_hash,
|
||||
// transactions_root=hash_tree_root(payload.transactions),
|
||||
// )
|
||||
func ProcessPayload(st state.BeaconState, payload interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
func ProcessPayload(st state.BeaconState, payload *enginev1.ExecutionPayload) (state.BeaconState, error) {
|
||||
if err := ValidatePayloadWhenMergeCompletes(st, payload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ValidatePayload(st, payload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
header, err := wrapper.PayloadToHeader(payload)
|
||||
|
||||
header, err := bellatrix.PayloadToHeader(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := st.SetLatestExecutionPayloadHeader(wrappedHeader); err != nil {
|
||||
if err := st.SetLatestExecutionPayloadHeader(header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// ValidatePayloadHeaderWhenMergeCompletes validates the payload header when the merge completes.
|
||||
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header interfaces.ExecutionData) error {
|
||||
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header *enginev1.ExecutionPayloadHeader) error {
|
||||
// Skip validation if the state is not merge compatible.
|
||||
complete, err := IsMergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
@@ -257,20 +228,20 @@ func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header interf
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(header.ParentHash(), h.BlockHash) {
|
||||
if !bytes.Equal(header.ParentHash, h.BlockHash) {
|
||||
return ErrInvalidPayloadBlockHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidatePayloadHeader validates the payload header.
|
||||
func ValidatePayloadHeader(st state.BeaconState, header interfaces.ExecutionData) error {
|
||||
func ValidatePayloadHeader(st state.BeaconState, header *enginev1.ExecutionPayloadHeader) error {
|
||||
// Validate header's random mix matches with state in current epoch
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(header.PrevRandao(), random) {
|
||||
if !bytes.Equal(header.PrevRandao, random) {
|
||||
return ErrInvalidPayloadPrevRandao
|
||||
}
|
||||
|
||||
@@ -279,20 +250,22 @@ func ValidatePayloadHeader(st state.BeaconState, header interfaces.ExecutionData
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if header.Timestamp() != uint64(t.Unix()) {
|
||||
if header.Timestamp != uint64(t.Unix()) {
|
||||
return ErrInvalidPayloadTimeStamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessPayloadHeader processes the payload header.
|
||||
func ProcessPayloadHeader(st state.BeaconState, header interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
func ProcessPayloadHeader(st state.BeaconState, header *enginev1.ExecutionPayloadHeader) (state.BeaconState, error) {
|
||||
if err := ValidatePayloadHeaderWhenMergeCompletes(st, header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ValidatePayloadHeader(st, header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := st.SetLatestExecutionPayloadHeader(header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -305,9 +278,9 @@ func GetBlockPayloadHash(blk interfaces.BeaconBlock) ([32]byte, error) {
|
||||
if IsPreBellatrixVersion(blk.Version()) {
|
||||
return payloadHash, nil
|
||||
}
|
||||
payload, err := blk.Body().Execution()
|
||||
payload, err := blk.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return payloadHash, err
|
||||
}
|
||||
return bytesutil.ToBytes32(payload.BlockHash()), nil
|
||||
return bytesutil.ToBytes32(payload.BlockHash), nil
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/forks/bellatrix"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
@@ -159,9 +160,7 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.payload)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.payload))
|
||||
got, err := blocks.IsMergeTransitionComplete(st)
|
||||
require.NoError(t, err)
|
||||
if got != tt.want {
|
||||
@@ -443,9 +442,7 @@ func Test_IsExecutionEnabled(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.header))
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload = tt.payload
|
||||
body, err := wrapper.WrappedBeaconBlockBody(blk.Block.Body)
|
||||
@@ -570,12 +567,8 @@ func Test_ValidatePayloadWhenMergeCompletes(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(tt.payload)
|
||||
require.NoError(t, err)
|
||||
err = blocks.ValidatePayloadWhenMergeCompletes(st, wrappedPayload)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.header))
|
||||
err := blocks.ValidatePayloadWhenMergeCompletes(st, tt.payload)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
@@ -623,9 +616,7 @@ func Test_ValidatePayload(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(tt.payload)
|
||||
require.NoError(t, err)
|
||||
err = blocks.ValidatePayload(st, wrappedPayload)
|
||||
err := blocks.ValidatePayload(st, tt.payload)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
@@ -673,14 +664,12 @@ func Test_ProcessPayload(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(tt.payload)
|
||||
require.NoError(t, err)
|
||||
st, err := blocks.ProcessPayload(st, wrappedPayload)
|
||||
st, err := blocks.ProcessPayload(st, tt.payload)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
require.Equal(t, tt.err, err)
|
||||
want, err := wrapper.PayloadToHeader(wrappedPayload)
|
||||
want, err := bellatrix.PayloadToHeader(tt.payload)
|
||||
require.Equal(t, tt.err, err)
|
||||
got, err := st.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
@@ -728,9 +717,7 @@ func Test_ProcessPayloadHeader(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
st, err := blocks.ProcessPayloadHeader(st, wrappedHeader)
|
||||
st, err := blocks.ProcessPayloadHeader(st, tt.header)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
@@ -781,9 +768,7 @@ func Test_ValidatePayloadHeader(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
err = blocks.ValidatePayloadHeader(st, wrappedHeader)
|
||||
err := blocks.ValidatePayloadHeader(st, tt.header)
|
||||
require.Equal(t, tt.err, err)
|
||||
})
|
||||
}
|
||||
@@ -792,9 +777,7 @@ func Test_ValidatePayloadHeader(t *testing.T) {
|
||||
func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
emptySt := st.Copy()
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(&enginev1.ExecutionPayloadHeader{BlockHash: []byte{'a'}})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(&enginev1.ExecutionPayloadHeader{BlockHash: []byte{'a'}}))
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
@@ -833,9 +816,7 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
err = blocks.ValidatePayloadHeaderWhenMergeCompletes(tt.state, wrappedHeader)
|
||||
err := blocks.ValidatePayloadHeaderWhenMergeCompletes(tt.state, tt.header)
|
||||
require.Equal(t, tt.err, err)
|
||||
})
|
||||
}
|
||||
@@ -843,9 +824,7 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
|
||||
func Test_PayloadToHeader(t *testing.T) {
|
||||
p := emptyPayload()
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(p)
|
||||
require.NoError(t, err)
|
||||
h, err := wrapper.PayloadToHeader(wrappedPayload)
|
||||
h, err := bellatrix.PayloadToHeader(p)
|
||||
require.NoError(t, err)
|
||||
txRoot, err := ssz.TransactionsRoot(p.Transactions)
|
||||
require.NoError(t, err)
|
||||
@@ -884,9 +863,7 @@ func Test_PayloadToHeader(t *testing.T) {
|
||||
|
||||
func BenchmarkBellatrixComplete(b *testing.B) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(b, 1)
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(emptyPayloadHeader())
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
require.NoError(b, st.SetLatestExecutionPayloadHeader(emptyPayloadHeader()))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
@@ -20,12 +19,6 @@ func UnrealizedCheckpoints(st state.BeaconState) (*ethpb.Checkpoint, *ethpb.Chec
|
||||
return nil, nil, errNilState
|
||||
}
|
||||
|
||||
if slots.ToEpoch(st.Slot()) <= params.BeaconConfig().GenesisEpoch+1 {
|
||||
jc := st.CurrentJustifiedCheckpoint()
|
||||
fc := st.FinalizedCheckpoint()
|
||||
return jc, fc, nil
|
||||
}
|
||||
|
||||
activeBalance, prevTarget, currentTarget, err := st.UnrealizedCheckpointBalances()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
||||
@@ -290,17 +290,24 @@ func ProcessBlockForStateRoot(
|
||||
return nil, errors.Wrap(err, "could not check if execution is enabled")
|
||||
}
|
||||
if enabled {
|
||||
executionData, err := blk.Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if blk.IsBlinded() {
|
||||
state, err = b.ProcessPayloadHeader(state, executionData)
|
||||
header, err := blk.Body().ExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = b.ProcessPayloadHeader(state, header)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution payload header")
|
||||
}
|
||||
} else {
|
||||
state, err = b.ProcessPayload(state, executionData)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution data")
|
||||
payload, err := blk.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = b.ProcessPayload(state, payload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution payload")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -53,7 +53,6 @@ type ReadOnlyDatabase interface {
|
||||
PowchainData(ctx context.Context) (*ethpb.ETH1ChainData, error)
|
||||
// Fee reicipients operations.
|
||||
FeeRecipientByValidatorID(ctx context.Context, id types.ValidatorIndex) (common.Address, error)
|
||||
RegistrationByValidatorID(ctx context.Context, id types.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
// origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
|
||||
@@ -22,11 +22,9 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -62,7 +60,6 @@ go_test(
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v3:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
|
||||
@@ -8,6 +8,7 @@ var errInvalidProposerBoostRoot = errors.New("invalid proposer boost root")
|
||||
var errUnknownFinalizedRoot = errors.New("unknown finalized root")
|
||||
var errUnknownJustifiedRoot = errors.New("unknown justified root")
|
||||
var errInvalidOptimisticStatus = errors.New("invalid optimistic status")
|
||||
var errUnknownPayloadHash = errors.New("unknown payload hash")
|
||||
var errInvalidNilCheckpoint = errors.New("invalid nil checkpoint")
|
||||
var errInvalidUnrealizedJustifiedEpoch = errors.New("invalid unrealized justified epoch")
|
||||
var errInvalidUnrealizedFinalizedEpoch = errors.New("invalid unrealized finalized epoch")
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
@@ -24,17 +23,15 @@ import (
|
||||
// New initializes a new fork choice store.
|
||||
func New() *ForkChoice {
|
||||
s := &Store{
|
||||
justifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
bestJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
unrealizedJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
unrealizedFinalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
prevJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
proposerBoostRoot: [32]byte{},
|
||||
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
slashedIndices: make(map[types.ValidatorIndex]bool),
|
||||
pruneThreshold: defaultPruneThreshold,
|
||||
justifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
bestJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
prevJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
proposerBoostRoot: [32]byte{},
|
||||
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
slashedIndices: make(map[types.ValidatorIndex]bool),
|
||||
pruneThreshold: defaultPruneThreshold,
|
||||
}
|
||||
|
||||
b := make([]uint64, 0)
|
||||
@@ -115,7 +112,7 @@ func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []
|
||||
}
|
||||
|
||||
// InsertNode processes a new block by inserting it to the fork choice store.
|
||||
func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, root [32]byte) error {
|
||||
func (f *ForkChoice) InsertNode(ctx context.Context, state state.ReadOnlyBeaconState, root [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.InsertNode")
|
||||
defer span.End()
|
||||
|
||||
@@ -145,14 +142,10 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
|
||||
return errInvalidNilCheckpoint
|
||||
}
|
||||
finalizedEpoch := fc.Epoch
|
||||
node, err := f.store.insert(ctx, slot, root, parentRoot, payloadHash, justifiedEpoch, finalizedEpoch)
|
||||
err := f.store.insert(ctx, slot, root, parentRoot, payloadHash, justifiedEpoch, finalizedEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if features.Get().EnablePullTips {
|
||||
jc, fc = f.store.pullTips(state, node, jc, fc)
|
||||
}
|
||||
return f.updateCheckpoints(ctx, jc, fc)
|
||||
}
|
||||
|
||||
@@ -553,7 +546,7 @@ func (f *ForkChoice) InsertOptimisticChain(ctx context.Context, chain []*forkcho
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := f.store.insert(ctx,
|
||||
if err := f.store.insert(ctx,
|
||||
b.Slot(), r, parentRoot, payloadHash,
|
||||
chain[i].JustifiedCheckpoint.Epoch, chain[i].FinalizedCheckpoint.Epoch); err != nil {
|
||||
return err
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
@@ -65,8 +64,5 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
f.store.justifiedCheckpoint = bjcp
|
||||
}
|
||||
}
|
||||
if features.Get().EnablePullTips {
|
||||
f.updateUnrealizedCheckpoints()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -32,6 +32,12 @@ func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, pa
|
||||
return invalidRoots, errInvalidParentRoot
|
||||
}
|
||||
}
|
||||
// Check if last valid hash is an ancestor of the passed node.
|
||||
lastValid, ok := s.nodeByPayload[payloadHash]
|
||||
if !ok || lastValid == nil {
|
||||
s.nodesLock.Unlock()
|
||||
return invalidRoots, errUnknownPayloadHash
|
||||
}
|
||||
firstInvalid := node
|
||||
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != payloadHash; firstInvalid = firstInvalid.parent {
|
||||
if ctx.Err() != nil {
|
||||
|
||||
@@ -30,23 +30,6 @@ func TestPruneInvalid(t *testing.T) {
|
||||
wantedRoots [][32]byte
|
||||
wantedErr error
|
||||
}{
|
||||
{ // Bogus LVH, root not in forkchoice
|
||||
[32]byte{'x'},
|
||||
[32]byte{'i'},
|
||||
[32]byte{'R'},
|
||||
13,
|
||||
[][32]byte{},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
// Bogus LVH
|
||||
[32]byte{'i'},
|
||||
[32]byte{'h'},
|
||||
[32]byte{'R'},
|
||||
12,
|
||||
[][32]byte{{'i'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'j'},
|
||||
[32]byte{'b'},
|
||||
|
||||
@@ -108,7 +108,7 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
func (s *Store) insert(ctx context.Context,
|
||||
slot types.Slot,
|
||||
root, parentRoot, payloadHash [fieldparams.RootLength]byte,
|
||||
justifiedEpoch, finalizedEpoch types.Epoch) (*Node, error) {
|
||||
justifiedEpoch, finalizedEpoch types.Epoch) error {
|
||||
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
|
||||
defer span.End()
|
||||
|
||||
@@ -116,8 +116,8 @@ func (s *Store) insert(ctx context.Context,
|
||||
defer s.nodesLock.Unlock()
|
||||
|
||||
// Return if the block has been inserted into Store before.
|
||||
if n, ok := s.nodeByRoot[root]; ok {
|
||||
return n, nil
|
||||
if _, ok := s.nodeByRoot[root]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
parent := s.nodeByRoot[parentRoot]
|
||||
@@ -141,14 +141,14 @@ func (s *Store) insert(ctx context.Context,
|
||||
s.treeRootNode = n
|
||||
s.headNode = n
|
||||
} else {
|
||||
return n, errInvalidParentRoot
|
||||
return errInvalidParentRoot
|
||||
}
|
||||
} else {
|
||||
parent.children = append(parent.children, n)
|
||||
// Apply proposer boost
|
||||
timeNow := uint64(time.Now().Unix())
|
||||
if timeNow < s.genesisTime {
|
||||
return n, nil
|
||||
return nil
|
||||
}
|
||||
secondsIntoSlot := (timeNow - s.genesisTime) % params.BeaconConfig().SecondsPerSlot
|
||||
currentSlot := slots.CurrentSlot(s.genesisTime)
|
||||
@@ -162,14 +162,14 @@ func (s *Store) insert(ctx context.Context,
|
||||
// Update best descendants
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx,
|
||||
s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch); err != nil {
|
||||
return n, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Update metrics.
|
||||
processedBlockCount.Inc()
|
||||
nodeCount.Set(float64(len(s.nodeByRoot)))
|
||||
|
||||
return n, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// pruneFinalizedNodeByRootMap prunes the `nodeByRoot` map
|
||||
|
||||
@@ -141,8 +141,7 @@ func TestStore_Insert(t *testing.T) {
|
||||
fc := &forkchoicetypes.Checkpoint{Epoch: 0}
|
||||
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, nodeByPayload: nodeByPayload, justifiedCheckpoint: jc, finalizedCheckpoint: fc}
|
||||
payloadHash := [32]byte{'a'}
|
||||
_, err := s.insert(context.Background(), 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.insert(context.Background(), 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1))
|
||||
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")
|
||||
assert.Equal(t, (*Node)(nil), treeRootNode.parent, "Incorrect parent")
|
||||
assert.Equal(t, 1, len(treeRootNode.children), "Incorrect children number")
|
||||
|
||||
@@ -18,26 +18,24 @@ type ForkChoice struct {
|
||||
|
||||
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
|
||||
type Store struct {
|
||||
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
|
||||
bestJustifiedCheckpoint *forkchoicetypes.Checkpoint // best justified checkpoint in store.
|
||||
unrealizedJustifiedCheckpoint *forkchoicetypes.Checkpoint // best unrealized justified checkpoint in store.
|
||||
unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
|
||||
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
|
||||
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
|
||||
pruneThreshold uint64 // do not prune tree unless threshold is reached.
|
||||
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostScore uint64 // previous proposer boosted root score.
|
||||
treeRootNode *Node // the root node of the store tree.
|
||||
headNode *Node // last head Node
|
||||
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
|
||||
nodeByPayload map[[fieldparams.RootLength]byte]*Node // nodes indexed by payload Hash
|
||||
slashedIndices map[types.ValidatorIndex]bool // the list of equivocating validator indices
|
||||
originRoot [fieldparams.RootLength]byte // The genesis block root
|
||||
nodesLock sync.RWMutex
|
||||
proposerBoostLock sync.RWMutex
|
||||
checkpointsLock sync.RWMutex
|
||||
genesisTime uint64
|
||||
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
|
||||
bestJustifiedCheckpoint *forkchoicetypes.Checkpoint // best justified checkpoint in store.
|
||||
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
|
||||
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
|
||||
pruneThreshold uint64 // do not prune tree unless threshold is reached.
|
||||
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostScore uint64 // previous proposer boosted root score.
|
||||
treeRootNode *Node // the root node of the store tree.
|
||||
headNode *Node // last head Node
|
||||
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
|
||||
nodeByPayload map[[fieldparams.RootLength]byte]*Node // nodes indexed by payload Hash
|
||||
slashedIndices map[types.ValidatorIndex]bool // the list of equivocating validator indices
|
||||
originRoot [fieldparams.RootLength]byte // The genesis block root
|
||||
nodesLock sync.RWMutex
|
||||
proposerBoostLock sync.RWMutex
|
||||
checkpointsLock sync.RWMutex
|
||||
genesisTime uint64
|
||||
}
|
||||
|
||||
// Node defines the individual block which includes its block parent, ancestor and how much weight accounted for it.
|
||||
|
||||
@@ -2,14 +2,7 @@ package doublylinkedtree
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
func (s *Store) setUnrealizedJustifiedEpoch(root [32]byte, epoch types.Epoch) error {
|
||||
@@ -42,78 +35,20 @@ func (s *Store) setUnrealizedFinalizedEpoch(root [32]byte, epoch types.Epoch) er
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateUnrealizedCheckpoints "realizes" the unrealized justified and finalized
|
||||
// epochs stored within nodes. It should be called at the beginning of each epoch.
|
||||
func (f *ForkChoice) updateUnrealizedCheckpoints() {
|
||||
// UpdateUnrealizedCheckpoints "realizes" the unrealized justified and finalized
|
||||
// epochs stored within nodes. It should be called at the beginning of each
|
||||
// epoch
|
||||
func (f *ForkChoice) UpdateUnrealizedCheckpoints() {
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
for _, node := range f.store.nodeByRoot {
|
||||
node.justifiedEpoch = node.unrealizedJustifiedEpoch
|
||||
node.finalizedEpoch = node.unrealizedFinalizedEpoch
|
||||
if node.justifiedEpoch > f.store.justifiedCheckpoint.Epoch {
|
||||
f.store.justifiedCheckpoint = f.store.unrealizedJustifiedCheckpoint
|
||||
if node.justifiedEpoch > f.store.bestJustifiedCheckpoint.Epoch {
|
||||
f.store.bestJustifiedCheckpoint = f.store.unrealizedJustifiedCheckpoint
|
||||
}
|
||||
f.store.justifiedCheckpoint.Epoch = node.justifiedEpoch
|
||||
}
|
||||
if node.finalizedEpoch > f.store.finalizedCheckpoint.Epoch {
|
||||
f.store.justifiedCheckpoint = f.store.unrealizedJustifiedCheckpoint
|
||||
f.store.finalizedCheckpoint = f.store.unrealizedFinalizedCheckpoint
|
||||
f.store.finalizedCheckpoint.Epoch = node.finalizedEpoch
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Store) pullTips(state state.BeaconState, node *Node, jc, fc *ethpb.Checkpoint) (*ethpb.Checkpoint, *ethpb.Checkpoint) {
|
||||
s.nodesLock.Lock()
|
||||
defer s.nodesLock.Unlock()
|
||||
|
||||
if node.parent == nil { // Nothing to do if the parent is nil.
|
||||
return jc, fc
|
||||
}
|
||||
|
||||
s.checkpointsLock.Lock()
|
||||
defer s.checkpointsLock.Unlock()
|
||||
|
||||
currentEpoch := slots.ToEpoch(slots.CurrentSlot(s.genesisTime))
|
||||
stateSlot := state.Slot()
|
||||
stateEpoch := slots.ToEpoch(stateSlot)
|
||||
currJustified := node.parent.unrealizedJustifiedEpoch == currentEpoch
|
||||
prevJustified := node.parent.unrealizedJustifiedEpoch+1 == currentEpoch
|
||||
tooEarlyForCurr := slots.SinceEpochStarts(stateSlot)*3 < params.BeaconConfig().SlotsPerEpoch*2
|
||||
// Exit early if it's justified or too early to be justified.
|
||||
if currJustified || (stateEpoch == currentEpoch && prevJustified && tooEarlyForCurr) {
|
||||
node.unrealizedJustifiedEpoch = node.parent.unrealizedJustifiedEpoch
|
||||
node.unrealizedFinalizedEpoch = node.parent.unrealizedFinalizedEpoch
|
||||
return jc, fc
|
||||
}
|
||||
|
||||
uj, uf, err := precompute.UnrealizedCheckpoints(state)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not compute unrealized checkpoints")
|
||||
uj, uf = jc, fc
|
||||
}
|
||||
|
||||
// Update store's unrealized checkpoints.
|
||||
if uj.Epoch > s.unrealizedJustifiedCheckpoint.Epoch {
|
||||
s.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{
|
||||
Epoch: uj.Epoch, Root: bytesutil.ToBytes32(uj.Root),
|
||||
}
|
||||
}
|
||||
if uf.Epoch > s.unrealizedFinalizedCheckpoint.Epoch {
|
||||
s.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{
|
||||
Epoch: uj.Epoch, Root: bytesutil.ToBytes32(uj.Root),
|
||||
}
|
||||
s.unrealizedFinalizedCheckpoint = &forkchoicetypes.Checkpoint{
|
||||
Epoch: uf.Epoch, Root: bytesutil.ToBytes32(uf.Root),
|
||||
}
|
||||
}
|
||||
|
||||
// Update node's checkpoints.
|
||||
node.unrealizedJustifiedEpoch, node.unrealizedFinalizedEpoch = uj.Epoch, uf.Epoch
|
||||
if stateEpoch < currentEpoch {
|
||||
jc, fc = uj, uf
|
||||
node.justifiedEpoch = uj.Epoch
|
||||
node.finalizedEpoch = uf.Epoch
|
||||
}
|
||||
return jc, fc
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"testing"
|
||||
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
@@ -98,7 +97,7 @@ func TestStore_LongFork(t *testing.T) {
|
||||
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'c'}].weight)
|
||||
|
||||
// Update unrealized justification, c becomes head
|
||||
f.updateUnrealizedCheckpoints()
|
||||
f.UpdateUnrealizedCheckpoints()
|
||||
headRoot, err = f.Head(ctx, []uint64{100})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'c'}, headRoot)
|
||||
@@ -148,8 +147,6 @@ func TestStore_NoDeadLock(t *testing.T) {
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'g'}, 2))
|
||||
require.NoError(t, f.store.setUnrealizedFinalizedEpoch([32]byte{'g'}, 1))
|
||||
f.store.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 2}
|
||||
f.store.unrealizedFinalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1}
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 107, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
@@ -179,7 +176,7 @@ func TestStore_NoDeadLock(t *testing.T) {
|
||||
require.Equal(t, types.Epoch(0), f.FinalizedCheckpoint().Epoch)
|
||||
|
||||
// Realized Justified checkpoints, H becomes head
|
||||
f.updateUnrealizedCheckpoints()
|
||||
f.UpdateUnrealizedCheckpoints()
|
||||
headRoot, err = f.Head(ctx, []uint64{100})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'h'}, headRoot)
|
||||
@@ -239,8 +236,7 @@ func TestStore_ForkNextEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'d'}, 1))
|
||||
f.store.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1}
|
||||
f.updateUnrealizedCheckpoints()
|
||||
f.UpdateUnrealizedCheckpoints()
|
||||
headRoot, err = f.Head(ctx, []uint64{100})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'d'}, headRoot)
|
||||
@@ -248,90 +244,3 @@ func TestStore_ForkNextEpoch(t *testing.T) {
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
|
||||
}
|
||||
|
||||
func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePullTips: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
ctx := context.Background()
|
||||
t.Run("Current epoch is justified", func(tt *testing.T) {
|
||||
f := setup(1, 1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 65, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = types.Epoch(2)
|
||||
driftGenesisTime(f, 66, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 66, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
require.Equal(tt, types.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, types.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
|
||||
})
|
||||
|
||||
t.Run("Previous Epoch is justified and too early for current", func(tt *testing.T) {
|
||||
f := setup(1, 1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 95, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = types.Epoch(2)
|
||||
driftGenesisTime(f, 96, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 96, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
require.Equal(tt, types.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, types.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
|
||||
})
|
||||
t.Run("Previous Epoch is justified and not too early for current", func(tt *testing.T) {
|
||||
f := setup(1, 1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 95, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = types.Epoch(2)
|
||||
driftGenesisTime(f, 127, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 127, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
// Check that the justification point is not the parent's.
|
||||
// This tests that the heuristics in pullTips did not apply and
|
||||
// the test continues to compute a bogus unrealized
|
||||
// justification
|
||||
require.Equal(tt, types.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
})
|
||||
t.Run("Block from previous Epoch", func(tt *testing.T) {
|
||||
f := setup(1, 1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 94, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = types.Epoch(2)
|
||||
driftGenesisTime(f, 96, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 95, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
// Check that the justification point is not the parent's.
|
||||
// This tests that the heuristics in pullTips did not apply and
|
||||
// the test continues to compute a bogus unrealized
|
||||
// justification
|
||||
require.Equal(tt, types.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
})
|
||||
t.Run("Previous Epoch is not justified", func(tt *testing.T) {
|
||||
f := setup(1, 1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 128, [32]byte{'p'}, [32]byte{}, [32]byte{}, 2, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
driftGenesisTime(f, 129, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 129, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 2, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
// Check that the justification point is not the parent's.
|
||||
// This tests that the heuristics in pullTips did not apply and
|
||||
// the test continues to compute a bogus unrealized
|
||||
// justification
|
||||
require.Equal(tt, types.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ type HeadRetriever interface {
|
||||
|
||||
// BlockProcessor processes the block that's used for accounting fork choice.
|
||||
type BlockProcessor interface {
|
||||
InsertNode(context.Context, state.BeaconState, [32]byte) error
|
||||
InsertNode(context.Context, state.ReadOnlyBeaconState, [32]byte) error
|
||||
InsertOptimisticChain(context.Context, []*forkchoicetypes.BlockAndCheckpoints) error
|
||||
}
|
||||
|
||||
|
||||
@@ -22,11 +22,9 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -63,7 +61,6 @@ go_test(
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v3:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
|
||||
@@ -17,4 +17,3 @@ var errInvalidNilCheckpoint = errors.New("invalid nil checkpoint")
|
||||
var errInvalidUnrealizedJustifiedEpoch = errors.New("invalid unrealized justified epoch")
|
||||
var errInvalidUnrealizedFinalizedEpoch = errors.New("invalid unrealized finalized epoch")
|
||||
var errNilBlockHeader = errors.New("invalid nil block header")
|
||||
var errInvalidParentRoot = errors.New("invalid parent root")
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
// It returns a list of deltas that represents the difference between old balances and new balances.
|
||||
func computeDeltas(
|
||||
ctx context.Context,
|
||||
count int,
|
||||
blockIndices map[[32]byte]uint64,
|
||||
votes []Vote,
|
||||
oldBalances, newBalances []uint64,
|
||||
@@ -22,7 +21,7 @@ func computeDeltas(
|
||||
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.computeDeltas")
|
||||
defer span.End()
|
||||
|
||||
deltas := make([]int, count)
|
||||
deltas := make([]int, len(blockIndices))
|
||||
|
||||
for validatorIndex, vote := range votes {
|
||||
// Skip if validator has been slashed
|
||||
|
||||
@@ -27,7 +27,7 @@ func TestComputeDelta_ZeroHash(t *testing.T) {
|
||||
}
|
||||
|
||||
slashedIndices := make(map[types.ValidatorIndex]bool)
|
||||
delta, _, err := computeDeltas(context.Background(), len(indices), indices, votes, oldBalances, newBalances, slashedIndices)
|
||||
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances, slashedIndices)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int(validatorCount), len(delta))
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestComputeDelta_AllVoteTheSame(t *testing.T) {
|
||||
}
|
||||
|
||||
slashedIndices := make(map[types.ValidatorIndex]bool)
|
||||
delta, _, err := computeDeltas(context.Background(), len(indices), indices, votes, oldBalances, newBalances, slashedIndices)
|
||||
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances, slashedIndices)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int(validatorCount), len(delta))
|
||||
|
||||
@@ -88,7 +88,7 @@ func TestComputeDelta_DifferentVotes(t *testing.T) {
|
||||
}
|
||||
|
||||
slashedIndices := make(map[types.ValidatorIndex]bool)
|
||||
delta, _, err := computeDeltas(context.Background(), len(indices), indices, votes, oldBalances, newBalances, slashedIndices)
|
||||
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances, slashedIndices)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int(validatorCount), len(delta))
|
||||
|
||||
@@ -118,7 +118,7 @@ func TestComputeDelta_MovingVotes(t *testing.T) {
|
||||
}
|
||||
|
||||
slashedIndices := make(map[types.ValidatorIndex]bool)
|
||||
delta, _, err := computeDeltas(context.Background(), len(indices), indices, votes, oldBalances, newBalances, slashedIndices)
|
||||
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances, slashedIndices)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int(validatorCount), len(delta))
|
||||
|
||||
@@ -151,7 +151,7 @@ func TestComputeDelta_MoveOutOfTree(t *testing.T) {
|
||||
Vote{indexToHash(1), [32]byte{'A'}, 0})
|
||||
|
||||
slashedIndices := make(map[types.ValidatorIndex]bool)
|
||||
delta, _, err := computeDeltas(context.Background(), len(indices), indices, votes, oldBalances, newBalances, slashedIndices)
|
||||
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances, slashedIndices)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(delta))
|
||||
assert.Equal(t, 0-2*int(balance), delta[0])
|
||||
@@ -180,7 +180,7 @@ func TestComputeDelta_ChangingBalances(t *testing.T) {
|
||||
}
|
||||
|
||||
slashedIndices := make(map[types.ValidatorIndex]bool)
|
||||
delta, _, err := computeDeltas(context.Background(), len(indices), indices, votes, oldBalances, newBalances, slashedIndices)
|
||||
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances, slashedIndices)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 16, len(delta))
|
||||
|
||||
@@ -214,7 +214,7 @@ func TestComputeDelta_ValidatorAppear(t *testing.T) {
|
||||
Vote{indexToHash(1), indexToHash(2), 0})
|
||||
|
||||
slashedIndices := make(map[types.ValidatorIndex]bool)
|
||||
delta, _, err := computeDeltas(context.Background(), len(indices), indices, votes, oldBalances, newBalances, slashedIndices)
|
||||
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances, slashedIndices)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(delta))
|
||||
assert.Equal(t, 0-int(balance), delta[0])
|
||||
@@ -240,7 +240,7 @@ func TestComputeDelta_ValidatorDisappears(t *testing.T) {
|
||||
Vote{indexToHash(1), indexToHash(2), 0})
|
||||
|
||||
slashedIndices := make(map[types.ValidatorIndex]bool)
|
||||
delta, _, err := computeDeltas(context.Background(), len(indices), indices, votes, oldBalances, newBalances, slashedIndices)
|
||||
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances, slashedIndices)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(delta))
|
||||
assert.Equal(t, 0-2*int(balance), delta[0])
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
@@ -65,8 +64,5 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
f.store.justifiedCheckpoint = bjcp
|
||||
}
|
||||
}
|
||||
if features.Get().EnablePullTips {
|
||||
f.UpdateUnrealizedCheckpoints()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -56,8 +56,8 @@ func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoo
|
||||
defer f.store.nodesLock.Unlock()
|
||||
invalidRoots := make([][32]byte, 0)
|
||||
lastValidIndex, ok := f.store.payloadIndices[payloadHash]
|
||||
if !ok {
|
||||
lastValidIndex = uint64(len(f.store.nodes))
|
||||
if !ok || lastValidIndex == NonExistentNode {
|
||||
return invalidRoots, errInvalidFinalizedNode
|
||||
}
|
||||
|
||||
invalidIndex, ok := f.store.nodesIndices[root]
|
||||
|
||||
@@ -385,6 +385,8 @@ func TestSetOptimisticToInvalid_InvalidRoots(t *testing.T) {
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'p'}, [32]byte{'p'}, [32]byte{'B'})
|
||||
require.ErrorIs(t, ErrUnknownNodeRoot, err)
|
||||
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'a'}, [32]byte{}, [32]byte{'p'})
|
||||
require.ErrorIs(t, errInvalidFinalizedNode, err)
|
||||
}
|
||||
|
||||
// This is a regression test (10445)
|
||||
@@ -415,40 +417,3 @@ func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
|
||||
f.store.proposerBoostLock.RUnlock()
|
||||
}
|
||||
|
||||
// This is a regression test (10996)
|
||||
func TestSetOptimisticToInvalid_BogusLVH(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
state, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root))
|
||||
|
||||
state, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root))
|
||||
|
||||
invalidRoots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'R'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(invalidRoots))
|
||||
require.Equal(t, [32]byte{'b'}, invalidRoots[0])
|
||||
}
|
||||
|
||||
// This is a regression test (10996)
|
||||
func TestSetOptimisticToInvalid_BogusLVH_RotNotImported(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
state, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root))
|
||||
|
||||
state, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root))
|
||||
|
||||
invalidRoots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'R'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(invalidRoots))
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
@@ -31,19 +30,17 @@ const defaultPruneThreshold = 256
|
||||
// New initializes a new fork choice store.
|
||||
func New() *ForkChoice {
|
||||
s := &Store{
|
||||
justifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
bestJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
unrealizedJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
prevJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
unrealizedFinalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
proposerBoostRoot: [32]byte{},
|
||||
nodes: make([]*Node, 0),
|
||||
nodesIndices: make(map[[32]byte]uint64),
|
||||
payloadIndices: make(map[[32]byte]uint64),
|
||||
canonicalNodes: make(map[[32]byte]bool),
|
||||
slashedIndices: make(map[types.ValidatorIndex]bool),
|
||||
pruneThreshold: defaultPruneThreshold,
|
||||
justifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
bestJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
prevJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
proposerBoostRoot: [32]byte{},
|
||||
nodes: make([]*Node, 0),
|
||||
nodesIndices: make(map[[32]byte]uint64),
|
||||
payloadIndices: make(map[[32]byte]uint64),
|
||||
canonicalNodes: make(map[[32]byte]bool),
|
||||
slashedIndices: make(map[types.ValidatorIndex]bool),
|
||||
pruneThreshold: defaultPruneThreshold,
|
||||
}
|
||||
|
||||
b := make([]uint64, 0)
|
||||
@@ -65,7 +62,7 @@ func (f *ForkChoice) Head(ctx context.Context, justifiedStateBalances []uint64)
|
||||
// Using the write lock here because `updateCanonicalNodes` that gets called subsequently requires a write operation.
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
deltas, newVotes, err := computeDeltas(ctx, len(f.store.nodes), f.store.nodesIndices, f.votes, f.balances, newBalances, f.store.slashedIndices)
|
||||
deltas, newVotes, err := computeDeltas(ctx, f.store.nodesIndices, f.votes, f.balances, newBalances, f.store.slashedIndices)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "Could not compute deltas")
|
||||
}
|
||||
@@ -120,7 +117,7 @@ func (f *ForkChoice) ProposerBoost() [fieldparams.RootLength]byte {
|
||||
}
|
||||
|
||||
// InsertNode processes a new block by inserting it to the fork choice store.
|
||||
func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, root [32]byte) error {
|
||||
func (f *ForkChoice) InsertNode(ctx context.Context, state state.ReadOnlyBeaconState, root [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.InsertNode")
|
||||
defer span.End()
|
||||
|
||||
@@ -150,14 +147,10 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
|
||||
return errInvalidNilCheckpoint
|
||||
}
|
||||
finalizedEpoch := fc.Epoch
|
||||
node, err := f.store.insert(ctx, slot, root, parentRoot, payloadHash, justifiedEpoch, finalizedEpoch)
|
||||
err := f.store.insert(ctx, slot, root, parentRoot, payloadHash, justifiedEpoch, finalizedEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if features.Get().EnablePullTips {
|
||||
jc, fc = f.store.pullTips(state, node, jc, fc)
|
||||
}
|
||||
return f.updateCheckpoints(ctx, jc, fc)
|
||||
}
|
||||
|
||||
@@ -469,7 +462,7 @@ func (s *Store) updateCanonicalNodes(ctx context.Context, root [32]byte) error {
|
||||
func (s *Store) insert(ctx context.Context,
|
||||
slot types.Slot,
|
||||
root, parent, payloadHash [32]byte,
|
||||
justifiedEpoch, finalizedEpoch types.Epoch) (*Node, error) {
|
||||
justifiedEpoch, finalizedEpoch types.Epoch) error {
|
||||
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.insert")
|
||||
defer span.End()
|
||||
|
||||
@@ -477,8 +470,8 @@ func (s *Store) insert(ctx context.Context,
|
||||
defer s.nodesLock.Unlock()
|
||||
|
||||
// Return if the block has been inserted into Store before.
|
||||
if idx, ok := s.nodesIndices[root]; ok {
|
||||
return s.nodes[idx], nil
|
||||
if _, ok := s.nodesIndices[root]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
index := uint64(len(s.nodes))
|
||||
@@ -509,7 +502,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
// Apply proposer boost
|
||||
timeNow := uint64(time.Now().Unix())
|
||||
if timeNow < s.genesisTime {
|
||||
return n, nil
|
||||
return nil
|
||||
}
|
||||
secondsIntoSlot := (timeNow - s.genesisTime) % params.BeaconConfig().SecondsPerSlot
|
||||
currentSlot := slots.CurrentSlot(s.genesisTime)
|
||||
@@ -523,7 +516,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
// Update parent with the best child and descendant only if it's available.
|
||||
if n.parent != NonExistentNode {
|
||||
if err := s.updateBestChildAndDescendant(parentIndex, index); err != nil {
|
||||
return n, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -531,7 +524,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
processedBlockCount.Inc()
|
||||
nodeCount.Set(float64(len(s.nodes)))
|
||||
|
||||
return n, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyWeightChanges iterates backwards through the nodes in store. It checks all nodes parent
|
||||
@@ -998,7 +991,7 @@ func (f *ForkChoice) InsertOptimisticChain(ctx context.Context, chain []*forkcho
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := f.store.insert(ctx,
|
||||
if err := f.store.insert(ctx,
|
||||
b.Slot(), r, parentRoot, payloadHash,
|
||||
chain[i].JustifiedCheckpoint.Epoch, chain[i].FinalizedCheckpoint.Epoch); err != nil {
|
||||
return err
|
||||
|
||||
@@ -114,8 +114,7 @@ func TestStore_Head_ContextCancelled(t *testing.T) {
|
||||
func TestStore_Insert_UnknownParent(t *testing.T) {
|
||||
// The new node does not have a parent.
|
||||
s := &Store{nodesIndices: make(map[[32]byte]uint64), payloadIndices: make(map[[32]byte]uint64)}
|
||||
_, err := s.insert(context.Background(), 100, [32]byte{'A'}, [32]byte{'B'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.insert(context.Background(), 100, [32]byte{'A'}, [32]byte{'B'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
assert.Equal(t, 1, len(s.nodes), "Did not insert block")
|
||||
assert.Equal(t, 1, len(s.nodesIndices), "Did not insert block")
|
||||
assert.Equal(t, NonExistentNode, s.nodes[0].parent, "Incorrect parent")
|
||||
@@ -134,8 +133,7 @@ func TestStore_Insert_KnownParent(t *testing.T) {
|
||||
payloadHash := [32]byte{'c'}
|
||||
s.justifiedCheckpoint = &forkchoicetypes.Checkpoint{}
|
||||
s.finalizedCheckpoint = &forkchoicetypes.Checkpoint{}
|
||||
_, err := s.insert(context.Background(), 100, [32]byte{'A'}, p, payloadHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.insert(context.Background(), 100, [32]byte{'A'}, p, payloadHash, 1, 1))
|
||||
assert.Equal(t, 2, len(s.nodes), "Did not insert block")
|
||||
assert.Equal(t, 2, len(s.nodesIndices), "Did not insert block")
|
||||
assert.Equal(t, uint64(0), s.nodes[1].parent, "Incorrect parent")
|
||||
|
||||
@@ -18,27 +18,25 @@ type ForkChoice struct {
|
||||
|
||||
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
|
||||
type Store struct {
|
||||
pruneThreshold uint64 // do not prune tree unless threshold is reached.
|
||||
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified checkpoint in store.
|
||||
bestJustifiedCheckpoint *forkchoicetypes.Checkpoint // best justified checkpoint in store.
|
||||
unrealizedJustifiedCheckpoint *forkchoicetypes.Checkpoint // best justified checkpoint in store.
|
||||
unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best justified checkpoint in store.
|
||||
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
|
||||
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized checkpoint in store.
|
||||
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostScore uint64 // previous proposer boosted root score.
|
||||
nodes []*Node // list of block nodes, each node is a representation of one block.
|
||||
nodesIndices map[[fieldparams.RootLength]byte]uint64 // the root of block node and the nodes index in the list.
|
||||
canonicalNodes map[[fieldparams.RootLength]byte]bool // the canonical block nodes.
|
||||
payloadIndices map[[fieldparams.RootLength]byte]uint64 // the payload hash of block node and the index in the list
|
||||
slashedIndices map[types.ValidatorIndex]bool // The list of equivocating validators
|
||||
originRoot [fieldparams.RootLength]byte // The genesis block root
|
||||
lastHeadRoot [fieldparams.RootLength]byte // The last cached head block root
|
||||
nodesLock sync.RWMutex
|
||||
proposerBoostLock sync.RWMutex
|
||||
checkpointsLock sync.RWMutex
|
||||
genesisTime uint64
|
||||
pruneThreshold uint64 // do not prune tree unless threshold is reached.
|
||||
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified checkpoint in store.
|
||||
bestJustifiedCheckpoint *forkchoicetypes.Checkpoint // best justified checkpoint in store.
|
||||
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
|
||||
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized checkpoint in store.
|
||||
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostScore uint64 // previous proposer boosted root score.
|
||||
nodes []*Node // list of block nodes, each node is a representation of one block.
|
||||
nodesIndices map[[fieldparams.RootLength]byte]uint64 // the root of block node and the nodes index in the list.
|
||||
canonicalNodes map[[fieldparams.RootLength]byte]bool // the canonical block nodes.
|
||||
payloadIndices map[[fieldparams.RootLength]byte]uint64 // the payload hash of block node and the index in the list
|
||||
slashedIndices map[types.ValidatorIndex]bool // The list of equivocating validators
|
||||
originRoot [fieldparams.RootLength]byte // The genesis block root
|
||||
lastHeadRoot [fieldparams.RootLength]byte // The last cached head block root
|
||||
nodesLock sync.RWMutex
|
||||
proposerBoostLock sync.RWMutex
|
||||
checkpointsLock sync.RWMutex
|
||||
genesisTime uint64
|
||||
}
|
||||
|
||||
// Node defines the individual block which includes its block parent, ancestor and how much weight accounted for it.
|
||||
|
||||
@@ -1,14 +1,7 @@
|
||||
package protoarray
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
func (s *Store) setUnrealizedJustifiedEpoch(root [32]byte, epoch types.Epoch) error {
|
||||
@@ -63,70 +56,10 @@ func (f *ForkChoice) UpdateUnrealizedCheckpoints() {
|
||||
node.justifiedEpoch = node.unrealizedJustifiedEpoch
|
||||
node.finalizedEpoch = node.unrealizedFinalizedEpoch
|
||||
if node.justifiedEpoch > f.store.justifiedCheckpoint.Epoch {
|
||||
if node.justifiedEpoch > f.store.bestJustifiedCheckpoint.Epoch {
|
||||
f.store.bestJustifiedCheckpoint = f.store.unrealizedJustifiedCheckpoint
|
||||
}
|
||||
f.store.justifiedCheckpoint = f.store.unrealizedJustifiedCheckpoint
|
||||
f.store.justifiedCheckpoint.Epoch = node.justifiedEpoch
|
||||
}
|
||||
if node.finalizedEpoch > f.store.finalizedCheckpoint.Epoch {
|
||||
f.store.justifiedCheckpoint = f.store.unrealizedJustifiedCheckpoint
|
||||
f.store.finalizedCheckpoint = f.store.unrealizedFinalizedCheckpoint
|
||||
f.store.finalizedCheckpoint.Epoch = node.finalizedEpoch
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Store) pullTips(state state.BeaconState, node *Node, jc, fc *ethpb.Checkpoint) (*ethpb.Checkpoint, *ethpb.Checkpoint) {
|
||||
s.nodesLock.Lock()
|
||||
defer s.nodesLock.Unlock()
|
||||
|
||||
if node.parent == NonExistentNode { // Nothing to do if the parent is nil.
|
||||
return jc, fc
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(slots.CurrentSlot(s.genesisTime))
|
||||
stateSlot := state.Slot()
|
||||
stateEpoch := slots.ToEpoch(stateSlot)
|
||||
|
||||
parent := s.nodes[node.parent]
|
||||
currJustified := parent.unrealizedJustifiedEpoch == currentEpoch
|
||||
prevJustified := parent.unrealizedJustifiedEpoch+1 == currentEpoch
|
||||
tooEarlyForCurr := slots.SinceEpochStarts(stateSlot)*3 < params.BeaconConfig().SlotsPerEpoch*2
|
||||
if currJustified || (stateEpoch == currentEpoch && prevJustified && tooEarlyForCurr) {
|
||||
node.unrealizedJustifiedEpoch = parent.unrealizedJustifiedEpoch
|
||||
node.unrealizedFinalizedEpoch = parent.unrealizedFinalizedEpoch
|
||||
return jc, fc
|
||||
}
|
||||
|
||||
uj, uf, err := precompute.UnrealizedCheckpoints(state)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not compute unrealized checkpoints")
|
||||
uj, uf = jc, fc
|
||||
}
|
||||
|
||||
// Update store's unrealized checkpoints.
|
||||
s.checkpointsLock.Lock()
|
||||
if uj.Epoch > s.unrealizedJustifiedCheckpoint.Epoch {
|
||||
s.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{
|
||||
Epoch: uj.Epoch, Root: bytesutil.ToBytes32(uj.Root),
|
||||
}
|
||||
}
|
||||
if uf.Epoch > s.unrealizedFinalizedCheckpoint.Epoch {
|
||||
s.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{
|
||||
Epoch: uj.Epoch, Root: bytesutil.ToBytes32(uj.Root),
|
||||
}
|
||||
s.unrealizedFinalizedCheckpoint = &forkchoicetypes.Checkpoint{
|
||||
Epoch: uf.Epoch, Root: bytesutil.ToBytes32(uf.Root),
|
||||
}
|
||||
}
|
||||
s.checkpointsLock.Unlock()
|
||||
|
||||
// Update node's checkpoints.
|
||||
node.unrealizedJustifiedEpoch, node.unrealizedFinalizedEpoch = uj.Epoch, uf.Epoch
|
||||
if stateEpoch < currentEpoch {
|
||||
jc, fc = uj, uf
|
||||
node.justifiedEpoch = uj.Epoch
|
||||
node.finalizedEpoch = uf.Epoch
|
||||
}
|
||||
|
||||
return jc, fc
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"testing"
|
||||
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
@@ -148,8 +147,6 @@ func TestStore_NoDeadLock(t *testing.T) {
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'g'}, 2))
|
||||
require.NoError(t, f.store.setUnrealizedFinalizedEpoch([32]byte{'g'}, 1))
|
||||
f.store.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 2}
|
||||
f.store.unrealizedFinalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1}
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 107, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
@@ -239,7 +236,6 @@ func TestStore_ForkNextEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'d'}, 1))
|
||||
f.store.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1}
|
||||
f.UpdateUnrealizedCheckpoints()
|
||||
headRoot, err = f.Head(ctx, []uint64{100})
|
||||
require.NoError(t, err)
|
||||
@@ -249,90 +245,3 @@ func TestStore_ForkNextEpoch(t *testing.T) {
|
||||
require.Equal(t, uint64(0), f.store.nodes[8].weight)
|
||||
require.Equal(t, uint64(100), f.store.nodes[7].weight)
|
||||
}
|
||||
|
||||
func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePullTips: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
ctx := context.Background()
|
||||
t.Run("Current epoch is justified", func(tt *testing.T) {
|
||||
f := setup(1, 1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 65, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.nodes[1].unrealizedJustifiedEpoch = types.Epoch(2)
|
||||
driftGenesisTime(f, 66, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 66, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
require.Equal(tt, types.Epoch(2), f.store.nodes[2].unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, types.Epoch(1), f.store.nodes[2].unrealizedFinalizedEpoch)
|
||||
})
|
||||
|
||||
t.Run("Previous Epoch is justified and too early for current", func(tt *testing.T) {
|
||||
f := setup(1, 1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 95, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.nodes[1].unrealizedJustifiedEpoch = types.Epoch(2)
|
||||
driftGenesisTime(f, 96, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 96, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
require.Equal(tt, types.Epoch(2), f.store.nodes[2].unrealizedJustifiedEpoch)
|
||||
require.Equal(tt, types.Epoch(1), f.store.nodes[2].unrealizedFinalizedEpoch)
|
||||
})
|
||||
t.Run("Previous Epoch is justified and not too early for current", func(tt *testing.T) {
|
||||
f := setup(1, 1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 95, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.nodes[1].unrealizedJustifiedEpoch = types.Epoch(2)
|
||||
driftGenesisTime(f, 127, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 127, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
// Check that the justification point is not the parent's.
|
||||
// This tests that the heuristics in pullTips did not apply and
|
||||
// the test continues to compute a bogus unrealized
|
||||
// justification
|
||||
require.Equal(tt, types.Epoch(1), f.store.nodes[2].unrealizedJustifiedEpoch)
|
||||
})
|
||||
t.Run("Block from previous Epoch", func(tt *testing.T) {
|
||||
f := setup(1, 1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 94, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
f.store.nodes[1].unrealizedJustifiedEpoch = types.Epoch(2)
|
||||
driftGenesisTime(f, 96, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 95, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
// Check that the justification point is not the parent's.
|
||||
// This tests that the heuristics in pullTips did not apply and
|
||||
// the test continues to compute a bogus unrealized
|
||||
// justification
|
||||
require.Equal(tt, types.Epoch(1), f.store.nodes[2].unrealizedJustifiedEpoch)
|
||||
})
|
||||
t.Run("Previous Epoch is not justified", func(tt *testing.T) {
|
||||
f := setup(1, 1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 128, [32]byte{'p'}, [32]byte{}, [32]byte{}, 2, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
driftGenesisTime(f, 129, 0)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 129, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 2, 1)
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
// Check that the justification point is not the parent's.
|
||||
// This tests that the heuristics in pullTips did not apply and
|
||||
// the test continues to compute a bogus unrealized
|
||||
// justification
|
||||
require.Equal(tt, types.Epoch(2), f.store.nodes[2].unrealizedJustifiedEpoch)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -57,7 +57,6 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//crypto/ecdsa:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
@@ -71,6 +70,7 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
@@ -79,8 +79,6 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//config:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/protocol/identify:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//connmgr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//control:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//crypto:go_default_library",
|
||||
@@ -88,8 +86,10 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p_core//network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_noise//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
|
||||
"@com_github_libp2p_go_tcp_transport//:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//net:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -149,7 +149,6 @@ go_test(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//crypto/ecdsa:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network:go_default_library",
|
||||
@@ -169,15 +168,15 @@ go_test(
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_kevinms_leakybucket_go//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/host/blank:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/net/swarm/testing:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_blankhost//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//crypto:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//host:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_noise//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_swarm//testing:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
bh "github.com/libp2p/go-libp2p/p2p/host/blank"
|
||||
swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
|
||||
bh "github.com/libp2p/go-libp2p-blankhost"
|
||||
swarmt "github.com/libp2p/go-libp2p-swarm/testing"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
@@ -392,10 +391,7 @@ func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, ma.Multiaddr, error) {
|
||||
|
||||
func convertToSingleMultiAddr(node *enode.Node) (ma.Multiaddr, error) {
|
||||
pubkey := node.Pubkey()
|
||||
assertedKey, err := ecdsaprysm.ConvertToInterfacePubkey(pubkey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get pubkey")
|
||||
}
|
||||
assertedKey := convertToInterfacePubkey(pubkey)
|
||||
id, err := peer.IDFromPublicKey(assertedKey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get peer id")
|
||||
@@ -405,10 +401,7 @@ func convertToSingleMultiAddr(node *enode.Node) (ma.Multiaddr, error) {
|
||||
|
||||
func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
|
||||
pubkey := node.Pubkey()
|
||||
assertedKey, err := ecdsaprysm.ConvertToInterfacePubkey(pubkey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get pubkey")
|
||||
}
|
||||
assertedKey := convertToInterfacePubkey(pubkey)
|
||||
id, err := peer.IDFromPublicKey(assertedKey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get peer id")
|
||||
|
||||
@@ -7,11 +7,10 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
noise "github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
noise "github.com/libp2p/go-libp2p-noise"
|
||||
"github.com/libp2p/go-tcp-transport"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
)
|
||||
|
||||
@@ -31,10 +30,7 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
|
||||
log.Fatalf("Failed to p2p listen: %v", err)
|
||||
}
|
||||
}
|
||||
ifaceKey, err := ecdsaprysm.ConvertToInterfacePrivkey(priKey)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to retrieve private key: %v", err)
|
||||
}
|
||||
ifaceKey := convertToInterfacePrivkey(priKey)
|
||||
id, err := peer.IDFromPublicKey(ifaceKey.GetPublic())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to retrieve peer id: %v", err)
|
||||
@@ -117,11 +113,7 @@ func multiAddressBuilderWithID(ipAddr, protocol string, port uint, id peer.ID) (
|
||||
// private key contents cannot be marshaled, an exception is thrown.
|
||||
func privKeyOption(privkey *ecdsa.PrivateKey) libp2p.Option {
|
||||
return func(cfg *libp2p.Config) error {
|
||||
ifaceKey, err := ecdsaprysm.ConvertToInterfacePrivkey(privkey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("ECDSA private key generated")
|
||||
return cfg.Apply(libp2p.Identity(ifaceKey))
|
||||
return cfg.Apply(libp2p.Identity(convertToInterfacePrivkey(privkey)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p-core/crypto"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
@@ -37,8 +36,7 @@ func TestPrivateKeyLoading(t *testing.T) {
|
||||
}
|
||||
pKey, err := privKey(cfg)
|
||||
require.NoError(t, err, "Could not apply option")
|
||||
newPkey, err := ecdsaprysm.ConvertToInterfacePrivkey(pKey)
|
||||
require.NoError(t, err)
|
||||
newPkey := convertToInterfacePrivkey(pKey)
|
||||
rawBytes, err := key.Raw()
|
||||
require.NoError(t, err)
|
||||
newRaw, err := newPkey.Raw()
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
noise "github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
noise "github.com/libp2p/go-libp2p-noise"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/crypto/ecdsa"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
@@ -164,7 +163,7 @@ func Test_AttSubnets(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
localNode = initializeAttSubnets(localNode)
|
||||
@@ -180,7 +179,7 @@ func Test_AttSubnets(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
entry := enr.WithEntry(attSubnetEnrKey, []byte{})
|
||||
@@ -198,7 +197,7 @@ func Test_AttSubnets(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
entry := enr.WithEntry(attSubnetEnrKey, make([]byte, 4))
|
||||
@@ -216,7 +215,7 @@ func Test_AttSubnets(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
entry := enr.WithEntry(attSubnetEnrKey, make([]byte, byteCount(int(attestationSubnetCount))+1))
|
||||
@@ -234,7 +233,7 @@ func Test_AttSubnets(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
entry := enr.WithEntry(attSubnetEnrKey, make([]byte, byteCount(int(attestationSubnetCount))+100))
|
||||
@@ -252,7 +251,7 @@ func Test_AttSubnets(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
bitV := bitfield.NewBitvector64()
|
||||
@@ -271,7 +270,7 @@ func Test_AttSubnets(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
bitV := bitfield.NewBitvector64()
|
||||
@@ -299,7 +298,7 @@ func Test_AttSubnets(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
bitV := bitfield.NewBitvector64()
|
||||
@@ -349,7 +348,7 @@ func Test_SyncSubnets(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
localNode = initializeSyncCommSubnets(localNode)
|
||||
@@ -365,7 +364,7 @@ func Test_SyncSubnets(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
entry := enr.WithEntry(syncCommsSubnetEnrKey, []byte{})
|
||||
@@ -383,7 +382,7 @@ func Test_SyncSubnets(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
entry := enr.WithEntry(syncCommsSubnetEnrKey, make([]byte, byteCount(int(syncCommsSubnetCount))+1))
|
||||
@@ -401,7 +400,7 @@ func Test_SyncSubnets(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
entry := enr.WithEntry(syncCommsSubnetEnrKey, make([]byte, byteCount(int(syncCommsSubnetCount))+100))
|
||||
@@ -419,7 +418,7 @@ func Test_SyncSubnets(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
bitV := bitfield.Bitvector4{byte(0x00)}
|
||||
@@ -438,7 +437,7 @@ func Test_SyncSubnets(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
bitV := bitfield.Bitvector4{byte(0x00)}
|
||||
@@ -464,7 +463,7 @@ func Test_SyncSubnets(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
assert.NoError(t, err)
|
||||
convertedKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
assert.NoError(t, err)
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
bitV := bitfield.Bitvector4{byte(0x00)}
|
||||
|
||||
@@ -25,8 +25,7 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/host/blank:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/net/swarm/testing:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_blankhost//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//connmgr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//control:go_default_library",
|
||||
@@ -37,6 +36,7 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p_core//peerstore:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_swarm//testing:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
bhost "github.com/libp2p/go-libp2p-blankhost"
|
||||
core "github.com/libp2p/go-libp2p-core"
|
||||
"github.com/libp2p/go-libp2p-core/control"
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
@@ -17,8 +18,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
"github.com/libp2p/go-libp2p-core/protocol"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
bhost "github.com/libp2p/go-libp2p/p2p/host/blank"
|
||||
swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
|
||||
swarmt "github.com/libp2p/go-libp2p-swarm/testing"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
|
||||
|
||||
@@ -12,12 +12,12 @@ import (
|
||||
"path"
|
||||
"time"
|
||||
|
||||
gcrypto "github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p-core/crypto"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/io/file"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -44,6 +44,22 @@ func SerializeENR(record *enr.Record) (string, error) {
|
||||
return enrString, nil
|
||||
}
|
||||
|
||||
func convertFromInterfacePrivKey(privkey crypto.PrivKey) *ecdsa.PrivateKey {
|
||||
typeAssertedKey := (*ecdsa.PrivateKey)(privkey.(*crypto.Secp256k1PrivateKey))
|
||||
typeAssertedKey.Curve = gcrypto.S256() // Temporary hack, so libp2p Secp256k1 is recognized as geth Secp256k1 in disc v5.1.
|
||||
return typeAssertedKey
|
||||
}
|
||||
|
||||
func convertToInterfacePrivkey(privkey *ecdsa.PrivateKey) crypto.PrivKey {
|
||||
typeAssertedKey := crypto.PrivKey((*crypto.Secp256k1PrivateKey)(privkey))
|
||||
return typeAssertedKey
|
||||
}
|
||||
|
||||
func convertToInterfacePubkey(pubkey *ecdsa.PublicKey) crypto.PubKey {
|
||||
typeAssertedKey := crypto.PubKey((*crypto.Secp256k1PublicKey)(pubkey))
|
||||
return typeAssertedKey
|
||||
}
|
||||
|
||||
// Determines a private key for p2p networking from the p2p service's
|
||||
// configuration struct. If no key is found, it generates a new one.
|
||||
func privKey(cfg *Config) (*ecdsa.PrivateKey, error) {
|
||||
@@ -61,7 +77,8 @@ func privKey(cfg *Config) (*ecdsa.PrivateKey, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
convertedKey := convertFromInterfacePrivKey(priv)
|
||||
return convertedKey, nil
|
||||
}
|
||||
if defaultKeysExist && privateKeyPath == "" {
|
||||
privateKeyPath = defaultKeyPath
|
||||
@@ -85,7 +102,7 @@ func privKeyFromFile(path string) (*ecdsa.PrivateKey, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ecdsaprysm.ConvertFromInterfacePrivKey(unmarshalledKey)
|
||||
return convertFromInterfacePrivKey(unmarshalledKey), nil
|
||||
}
|
||||
|
||||
// Retrieves node p2p metadata from a set of configuration values
|
||||
|
||||
@@ -39,8 +39,6 @@ go_library(
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//contracts/deposit:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
@@ -79,7 +77,6 @@ go_test(
|
||||
"block_reader_test.go",
|
||||
"check_transition_config_test.go",
|
||||
"deposit_test.go",
|
||||
"engine_client_fuzz_test.go",
|
||||
"engine_client_test.go",
|
||||
"init_test.go",
|
||||
"log_processing_test.go",
|
||||
@@ -88,7 +85,6 @@ go_test(
|
||||
"provider_test.go",
|
||||
"service_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
@@ -125,7 +121,6 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//accounts/abi/bind/backends:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/beacon:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//trie:go_default_library",
|
||||
|
||||
@@ -4,14 +4,12 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/holiman/uint256"
|
||||
mockChain "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
@@ -163,27 +161,7 @@ func TestService_logTtdStatus(t *testing.T) {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
resp := &pb.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.Hash{},
|
||||
UncleHash: common.Hash{},
|
||||
Coinbase: common.Address{},
|
||||
Root: common.Hash{},
|
||||
TxHash: common.Hash{},
|
||||
ReceiptHash: common.Hash{},
|
||||
Bloom: gethtypes.Bloom{},
|
||||
Difficulty: big.NewInt(1),
|
||||
Number: big.NewInt(2),
|
||||
GasLimit: 3,
|
||||
GasUsed: 4,
|
||||
Time: 5,
|
||||
Extra: nil,
|
||||
MixDigest: common.Hash{},
|
||||
Nonce: gethtypes.BlockNonce{},
|
||||
BaseFee: big.NewInt(6),
|
||||
},
|
||||
TotalDifficulty: "0x12345678",
|
||||
}
|
||||
resp := &pb.ExecutionBlock{TotalDifficulty: "0x12345678"}
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
|
||||
@@ -14,8 +14,6 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -48,19 +46,10 @@ type ForkchoiceUpdatedResponse struct {
|
||||
PayloadId *pb.PayloadIDBytes `json:"payloadId"`
|
||||
}
|
||||
|
||||
// ExecutionPayloadReconstructor defines a service that can reconstruct a full beacon
|
||||
// block with an execution payload from a signed beacon block and a connection
|
||||
// to an execution client's engine API.
|
||||
type ExecutionPayloadReconstructor interface {
|
||||
ReconstructFullBellatrixBlock(
|
||||
ctx context.Context, blindedBlock interfaces.SignedBeaconBlock,
|
||||
) (interfaces.SignedBeaconBlock, error)
|
||||
}
|
||||
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
// execution node's engine service via JSON-RPC.
|
||||
type EngineCaller interface {
|
||||
NewPayload(ctx context.Context, payload interfaces.ExecutionData) ([]byte, error)
|
||||
NewPayload(ctx context.Context, payload *pb.ExecutionPayload) ([]byte, error)
|
||||
ForkchoiceUpdated(
|
||||
ctx context.Context, state *pb.ForkchoiceState, attrs *pb.PayloadAttributes,
|
||||
) (*pb.PayloadIDBytes, []byte, error)
|
||||
@@ -73,7 +62,7 @@ type EngineCaller interface {
|
||||
}
|
||||
|
||||
// NewPayload calls the engine_newPayloadV1 method via JSON-RPC.
|
||||
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData) ([]byte, error) {
|
||||
func (s *Service) NewPayload(ctx context.Context, payload *pb.ExecutionPayload) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.NewPayload")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
@@ -84,18 +73,14 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
result := &pb.PayloadStatus{}
|
||||
payloadPb, ok := payload.Proto().(*pb.ExecutionPayload)
|
||||
if !ok {
|
||||
return nil, errors.New("execution data must be an execution payload")
|
||||
}
|
||||
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethod, payloadPb)
|
||||
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethod, payload)
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
|
||||
switch result.Status {
|
||||
case pb.PayloadStatus_INVALID_BLOCK_HASH:
|
||||
return nil, ErrInvalidBlockHashPayloadStatus
|
||||
return nil, fmt.Errorf("could not validate block hash: %v", result.ValidationError)
|
||||
case pb.PayloadStatus_ACCEPTED, pb.PayloadStatus_SYNCING:
|
||||
return nil, ErrAcceptedSyncingPayloadStatus
|
||||
case pb.PayloadStatus_INVALID:
|
||||
@@ -243,8 +228,8 @@ func (s *Service) GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error
|
||||
}
|
||||
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
|
||||
|
||||
parentHash := blk.ParentHash
|
||||
if parentHash == params.BeaconConfig().ZeroHash {
|
||||
parentHash := bytesutil.ToBytes32(blk.ParentHash)
|
||||
if len(blk.ParentHash) == 0 || parentHash == params.BeaconConfig().ZeroHash {
|
||||
return nil, false, nil
|
||||
}
|
||||
parentBlk, err := s.ExecutionBlockByHash(ctx, parentHash)
|
||||
@@ -263,12 +248,12 @@ func (s *Service) GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error
|
||||
if !parentReachedTTD {
|
||||
log.WithFields(logrus.Fields{
|
||||
"number": blk.Number,
|
||||
"hash": fmt.Sprintf("%#x", bytesutil.Trunc(blk.Hash[:])),
|
||||
"hash": fmt.Sprintf("%#x", bytesutil.Trunc(blk.Hash)),
|
||||
"td": blk.TotalDifficulty,
|
||||
"parentTd": parentBlk.TotalDifficulty,
|
||||
"ttd": terminalTotalDifficulty,
|
||||
}).Info("Retrieved terminal block hash")
|
||||
return blk.Hash[:], true, nil
|
||||
return blk.Hash, true, nil
|
||||
}
|
||||
} else {
|
||||
return nil, false, nil
|
||||
@@ -305,80 +290,6 @@ func (s *Service) ExecutionBlockByHash(ctx context.Context, hash common.Hash) (*
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// ReconstructFullBellatrixBlock takes in a blinded beacon block and reconstructs
|
||||
// a beacon block with a full execution payload via the engine API.
|
||||
func (s *Service) ReconstructFullBellatrixBlock(
|
||||
ctx context.Context, blindedBlock interfaces.SignedBeaconBlock,
|
||||
) (interfaces.SignedBeaconBlock, error) {
|
||||
if err := wrapper.BeaconBlockIsNil(blindedBlock); err != nil {
|
||||
return nil, errors.Wrap(err, "cannot reconstruct bellatrix block from nil data")
|
||||
}
|
||||
if !blindedBlock.Block().IsBlinded() {
|
||||
return nil, errors.New("can only reconstruct block from blinded block format")
|
||||
}
|
||||
header, err := blindedBlock.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
executionBlockHash := common.BytesToHash(header.BlockHash())
|
||||
executionBlock, err := s.ExecutionBlockByHash(ctx, executionBlockHash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch execution block with txs by hash %#x: %v", executionBlockHash, err)
|
||||
}
|
||||
if executionBlock == nil {
|
||||
return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionBlockHash)
|
||||
}
|
||||
payload, err := fullPayloadFromExecutionBlock(header, executionBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullBlock, err := wrapper.BuildSignedBeaconBlockFromExecutionPayload(blindedBlock, payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reconstructedExecutionPayloadCount.Add(1)
|
||||
return fullBlock, nil
|
||||
}
|
||||
|
||||
func fullPayloadFromExecutionBlock(
|
||||
header interfaces.ExecutionData, block *pb.ExecutionBlock,
|
||||
) (*pb.ExecutionPayload, error) {
|
||||
if header.IsNil() || block == nil {
|
||||
return nil, errors.New("execution block and header cannot be nil")
|
||||
}
|
||||
if !bytes.Equal(header.BlockHash(), block.Hash[:]) {
|
||||
return nil, fmt.Errorf(
|
||||
"block hash field in execution header %#x does not match execution block hash %#x",
|
||||
header.BlockHash(),
|
||||
block.Hash,
|
||||
)
|
||||
}
|
||||
txs := make([][]byte, len(block.Transactions))
|
||||
for i, tx := range block.Transactions {
|
||||
txBin, err := tx.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txs[i] = txBin
|
||||
}
|
||||
return &pb.ExecutionPayload{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: block.Hash[:],
|
||||
Transactions: txs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Handles errors received from the RPC server according to the specification.
|
||||
func handleRPCError(err error) error {
|
||||
if err == nil {
|
||||
@@ -421,7 +332,7 @@ func handleRPCError(err error) error {
|
||||
if !ok {
|
||||
return errors.Wrapf(err, "got an unexpected error in JSON-RPC response")
|
||||
}
|
||||
return errors.Wrapf(ErrServer, "%v", errWithData.Error())
|
||||
return errors.Wrapf(ErrServer, "%v", errWithData.ErrorData())
|
||||
default:
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package powchain_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/beacon"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
)
|
||||
|
||||
func FuzzForkChoiceResponse(f *testing.F) {
|
||||
valHash := common.Hash([32]byte{0xFF, 0x01})
|
||||
payloadID := beacon.PayloadID([8]byte{0x01, 0xFF, 0xAA, 0x00, 0xEE, 0xFE, 0x00, 0x00})
|
||||
valErr := "asjajshjahsaj"
|
||||
seed := &beacon.ForkChoiceResponse{
|
||||
PayloadStatus: beacon.PayloadStatusV1{
|
||||
Status: "INVALID_TERMINAL_BLOCK",
|
||||
LatestValidHash: &valHash,
|
||||
ValidationError: &valErr,
|
||||
},
|
||||
PayloadID: &payloadID,
|
||||
}
|
||||
output, err := json.Marshal(seed)
|
||||
assert.NoError(f, err)
|
||||
f.Add(output)
|
||||
f.Fuzz(func(t *testing.T, jsonBlob []byte) {
|
||||
gethResp := &beacon.ForkChoiceResponse{}
|
||||
prysmResp := &powchain.ForkchoiceUpdatedResponse{}
|
||||
gethErr := json.Unmarshal(jsonBlob, gethResp)
|
||||
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, fmt.Sprintf("geth and prysm unmarshaller return inconsistent errors. %v and %v", gethErr, prysmErr))
|
||||
// Nothing to marshal if we have an error.
|
||||
if gethErr != nil {
|
||||
return
|
||||
}
|
||||
gethBlob, gethErr := json.Marshal(gethResp)
|
||||
prysmBlob, prysmErr := json.Marshal(prysmResp)
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, "geth and prysm unmarshaller return inconsistent errors")
|
||||
newGethResp := &beacon.ForkChoiceResponse{}
|
||||
newGethErr := json.Unmarshal(prysmBlob, newGethResp)
|
||||
assert.NoError(t, newGethErr)
|
||||
if newGethResp.PayloadStatus.Status == "UNKNOWN" {
|
||||
return
|
||||
}
|
||||
|
||||
newGethResp2 := &beacon.ForkChoiceResponse{}
|
||||
newGethErr = json.Unmarshal(gethBlob, newGethResp2)
|
||||
assert.NoError(t, newGethErr)
|
||||
|
||||
assert.DeepEqual(t, newGethResp.PayloadID, newGethResp2.PayloadID)
|
||||
assert.DeepEqual(t, newGethResp.PayloadStatus.Status, newGethResp2.PayloadStatus.Status)
|
||||
assert.DeepEqual(t, newGethResp.PayloadStatus.LatestValidHash, newGethResp2.PayloadStatus.LatestValidHash)
|
||||
isNilOrEmpty := newGethResp.PayloadStatus.ValidationError == nil || (*newGethResp.PayloadStatus.ValidationError == "")
|
||||
isNilOrEmpty2 := newGethResp2.PayloadStatus.ValidationError == nil || (*newGethResp2.PayloadStatus.ValidationError == "")
|
||||
assert.DeepEqual(t, isNilOrEmpty, isNilOrEmpty2)
|
||||
if !isNilOrEmpty {
|
||||
assert.DeepEqual(t, *newGethResp.PayloadStatus.ValidationError, *newGethResp2.PayloadStatus.ValidationError)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func FuzzExecutionPayload(f *testing.F) {
|
||||
logsBloom := [256]byte{'j', 'u', 'n', 'k'}
|
||||
execData := &beacon.ExecutableDataV1{
|
||||
ParentHash: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
FeeRecipient: common.Address([20]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}),
|
||||
StateRoot: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
ReceiptsRoot: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
LogsBloom: logsBloom[:],
|
||||
Random: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
Number: math.MaxUint64,
|
||||
GasLimit: math.MaxUint64,
|
||||
GasUsed: math.MaxUint64,
|
||||
Timestamp: 100,
|
||||
ExtraData: nil,
|
||||
BaseFeePerGas: big.NewInt(math.MaxInt),
|
||||
BlockHash: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
Transactions: [][]byte{{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}, {0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}, {0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}, {0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}},
|
||||
}
|
||||
output, err := json.Marshal(execData)
|
||||
assert.NoError(f, err)
|
||||
f.Add(output)
|
||||
f.Fuzz(func(t *testing.T, jsonBlob []byte) {
|
||||
gethResp := &beacon.ExecutableDataV1{}
|
||||
prysmResp := &pb.ExecutionPayload{}
|
||||
gethErr := json.Unmarshal(jsonBlob, gethResp)
|
||||
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, fmt.Sprintf("geth and prysm unmarshaller return inconsistent errors. %v and %v", gethErr, prysmErr))
|
||||
// Nothing to marshal if we have an error.
|
||||
if gethErr != nil {
|
||||
return
|
||||
}
|
||||
gethBlob, gethErr := json.Marshal(gethResp)
|
||||
prysmBlob, prysmErr := json.Marshal(prysmResp)
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, "geth and prysm unmarshaller return inconsistent errors")
|
||||
newGethResp := &beacon.ExecutableDataV1{}
|
||||
newGethErr := json.Unmarshal(prysmBlob, newGethResp)
|
||||
assert.NoError(t, newGethErr)
|
||||
newGethResp2 := &beacon.ExecutableDataV1{}
|
||||
newGethErr = json.Unmarshal(gethBlob, newGethResp2)
|
||||
assert.NoError(t, newGethErr)
|
||||
|
||||
assert.DeepEqual(t, newGethResp.LogsBloom, newGethResp2.LogsBloom)
|
||||
})
|
||||
}
|
||||
@@ -12,24 +12,19 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
mocks "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var (
|
||||
_ = ExecutionPayloadReconstructor(&Service{})
|
||||
_ = EngineCaller(&Service{})
|
||||
_ = EngineCaller(&mocks.EngineClient{})
|
||||
)
|
||||
@@ -65,9 +60,7 @@ func TestClient_IPC(t *testing.T) {
|
||||
require.Equal(t, true, ok)
|
||||
req, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(req)
|
||||
require.NoError(t, err)
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload)
|
||||
latestValidHash, err := srv.NewPayload(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
|
||||
})
|
||||
@@ -232,9 +225,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client := newPayloadSetup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, execPayload)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -246,9 +237,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client := newPayloadSetup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, execPayload)
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -260,10 +249,8 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client := newPayloadSetup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
resp, err := client.NewPayload(ctx, execPayload)
|
||||
require.ErrorContains(t, "could not validate block hash", err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethod+" INVALID status", func(t *testing.T) {
|
||||
@@ -274,9 +261,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client := newPayloadSetup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, execPayload)
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -288,9 +273,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client := newPayloadSetup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
resp, err := client.NewPayload(ctx, execPayload)
|
||||
require.ErrorIs(t, ErrUnknownPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -402,101 +385,6 @@ func TestClient_HTTP(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestReconstructFullBellatrixBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
t.Run("nil block", func(t *testing.T) {
|
||||
service := &Service{}
|
||||
|
||||
_, err := service.ReconstructFullBellatrixBlock(ctx, nil)
|
||||
require.ErrorContains(t, "nil data", err)
|
||||
})
|
||||
t.Run("only blinded block", func(t *testing.T) {
|
||||
want := "can only reconstruct block from blinded block format"
|
||||
service := &Service{}
|
||||
bellatrixBlock := util.NewBeaconBlockBellatrix()
|
||||
wrapped, err := wrapper.WrappedSignedBeaconBlock(bellatrixBlock)
|
||||
require.NoError(t, err)
|
||||
_, err = service.ReconstructFullBellatrixBlock(ctx, wrapped)
|
||||
require.ErrorContains(t, want, err)
|
||||
})
|
||||
t.Run("properly reconstructs block with correct payload", func(t *testing.T) {
|
||||
fix := fixtures()
|
||||
payload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
jsonPayload := make(map[string]interface{})
|
||||
tx := gethtypes.NewTransaction(
|
||||
0,
|
||||
common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"),
|
||||
big.NewInt(0), 0, big.NewInt(0),
|
||||
nil,
|
||||
)
|
||||
txs := []*gethtypes.Transaction{tx}
|
||||
encodedBinaryTxs := make([][]byte, 1)
|
||||
var err error
|
||||
encodedBinaryTxs[0], err = txs[0].MarshalBinary()
|
||||
require.NoError(t, err)
|
||||
payload.Transactions = encodedBinaryTxs
|
||||
jsonPayload["transactions"] = txs
|
||||
num := big.NewInt(1)
|
||||
encodedNum := hexutil.EncodeBig(num)
|
||||
jsonPayload["hash"] = hexutil.Encode(payload.BlockHash)
|
||||
jsonPayload["parentHash"] = common.BytesToHash([]byte("parent"))
|
||||
jsonPayload["sha3Uncles"] = common.BytesToHash([]byte("uncles"))
|
||||
jsonPayload["miner"] = common.BytesToAddress([]byte("miner"))
|
||||
jsonPayload["stateRoot"] = common.BytesToHash([]byte("state"))
|
||||
jsonPayload["transactionsRoot"] = common.BytesToHash([]byte("txs"))
|
||||
jsonPayload["receiptsRoot"] = common.BytesToHash([]byte("receipts"))
|
||||
jsonPayload["logsBloom"] = gethtypes.BytesToBloom([]byte("bloom"))
|
||||
jsonPayload["gasLimit"] = hexutil.EncodeUint64(1)
|
||||
jsonPayload["gasUsed"] = hexutil.EncodeUint64(2)
|
||||
jsonPayload["timestamp"] = hexutil.EncodeUint64(3)
|
||||
jsonPayload["number"] = encodedNum
|
||||
jsonPayload["extraData"] = common.BytesToHash([]byte("extra"))
|
||||
jsonPayload["totalDifficulty"] = "0x123456"
|
||||
jsonPayload["difficulty"] = encodedNum
|
||||
jsonPayload["size"] = encodedNum
|
||||
jsonPayload["baseFeePerGas"] = encodedNum
|
||||
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(payload)
|
||||
require.NoError(t, err)
|
||||
header, err := wrapper.PayloadToHeader(wrappedPayload)
|
||||
require.NoError(t, err)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": jsonPayload,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
blindedBlock := util.NewBlindedBeaconBlockBellatrix()
|
||||
|
||||
blindedBlock.Block.Body.ExecutionPayloadHeader = header
|
||||
wrapped, err := wrapper.WrappedSignedBeaconBlock(blindedBlock)
|
||||
require.NoError(t, err)
|
||||
reconstructed, err := service.ReconstructFullBellatrixBlock(ctx, wrapped)
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := reconstructed.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, payload, got.Proto())
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -528,7 +416,7 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
name: "current execution block invalid TD",
|
||||
paramsTd: "1",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: common.BytesToHash([]byte("a")),
|
||||
Hash: []byte{'a'},
|
||||
TotalDifficulty: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
|
||||
},
|
||||
errString: "could not convert total difficulty to uint256",
|
||||
@@ -537,10 +425,8 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
name: "current execution block has zero hash parent",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: common.BytesToHash([]byte("a")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash(params.BeaconConfig().ZeroHash[:]),
|
||||
},
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: params.BeaconConfig().ZeroHash[:],
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
},
|
||||
@@ -548,10 +434,8 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
name: "could not get parent block",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: common.BytesToHash([]byte("a")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("b")),
|
||||
},
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: []byte{'b'},
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
errString: "could not get parent execution block",
|
||||
@@ -560,17 +444,13 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
name: "parent execution block invalid TD",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: common.BytesToHash([]byte("a")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("b")),
|
||||
},
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: []byte{'b'},
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
parentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: common.BytesToHash([]byte("b")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("c")),
|
||||
},
|
||||
Hash: []byte{'b'},
|
||||
ParentHash: []byte{'c'},
|
||||
TotalDifficulty: "1",
|
||||
},
|
||||
errString: "could not convert total difficulty to uint256",
|
||||
@@ -579,37 +459,29 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
name: "happy case",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: common.BytesToHash([]byte("a")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("b")),
|
||||
},
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: []byte{'b'},
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
parentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: common.BytesToHash([]byte("b")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("c")),
|
||||
},
|
||||
Hash: []byte{'b'},
|
||||
ParentHash: []byte{'c'},
|
||||
TotalDifficulty: "0x1",
|
||||
},
|
||||
wantExists: true,
|
||||
wantTerminalBlockHash: common.BytesToHash([]byte("a")).Bytes(),
|
||||
wantTerminalBlockHash: []byte{'a'},
|
||||
},
|
||||
{
|
||||
name: "ttd not reached",
|
||||
paramsTd: "3",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: common.BytesToHash([]byte("a")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("b")),
|
||||
},
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: []byte{'b'},
|
||||
TotalDifficulty: "0x2",
|
||||
},
|
||||
parentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: common.BytesToHash([]byte("b")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("c")),
|
||||
},
|
||||
Hash: []byte{'b'},
|
||||
ParentHash: []byte{'c'},
|
||||
TotalDifficulty: "0x1",
|
||||
},
|
||||
},
|
||||
@@ -622,7 +494,7 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
var m map[[32]byte]*pb.ExecutionBlock
|
||||
if tt.parentPowBlock != nil {
|
||||
m = map[[32]byte]*pb.ExecutionBlock{
|
||||
tt.parentPowBlock.Hash: tt.parentPowBlock,
|
||||
bytesutil.ToBytes32(tt.parentPowBlock.Hash): tt.parentPowBlock,
|
||||
}
|
||||
}
|
||||
client := mocks.EngineClient{
|
||||
@@ -877,6 +749,8 @@ func fixtures() map[string]interface{} {
|
||||
BlockHash: foo[:],
|
||||
Transactions: [][]byte{foo[:]},
|
||||
}
|
||||
number := bytesutil.PadTo([]byte("100"), fieldparams.RootLength)
|
||||
hash := bytesutil.PadTo([]byte("hash"), fieldparams.RootLength)
|
||||
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
|
||||
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
|
||||
miner := bytesutil.PadTo([]byte("miner"), fieldparams.FeeRecipientLength)
|
||||
@@ -885,24 +759,25 @@ func fixtures() map[string]interface{} {
|
||||
receiptsRoot := bytesutil.PadTo([]byte("receiptsRoot"), fieldparams.RootLength)
|
||||
logsBloom := bytesutil.PadTo([]byte("logs"), fieldparams.LogsBloomLength)
|
||||
executionBlock := &pb.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash(parent),
|
||||
UncleHash: common.BytesToHash(sha3Uncles),
|
||||
Coinbase: common.BytesToAddress(miner),
|
||||
Root: common.BytesToHash(stateRoot),
|
||||
TxHash: common.BytesToHash(transactionsRoot),
|
||||
ReceiptHash: common.BytesToHash(receiptsRoot),
|
||||
Bloom: gethtypes.BytesToBloom(logsBloom),
|
||||
Difficulty: big.NewInt(1),
|
||||
Number: big.NewInt(2),
|
||||
GasLimit: 3,
|
||||
GasUsed: 4,
|
||||
Time: 5,
|
||||
Extra: []byte("extra"),
|
||||
MixDigest: common.BytesToHash([]byte("mix")),
|
||||
Nonce: gethtypes.EncodeNonce(6),
|
||||
BaseFee: big.NewInt(7),
|
||||
},
|
||||
Number: number,
|
||||
Hash: hash,
|
||||
ParentHash: parent,
|
||||
Sha3Uncles: sha3Uncles,
|
||||
Miner: miner,
|
||||
StateRoot: stateRoot,
|
||||
TransactionsRoot: transactionsRoot,
|
||||
ReceiptsRoot: receiptsRoot,
|
||||
LogsBloom: logsBloom,
|
||||
Difficulty: bytesutil.PadTo([]byte("1"), fieldparams.RootLength),
|
||||
TotalDifficulty: "2",
|
||||
GasLimit: 3,
|
||||
GasUsed: 4,
|
||||
Timestamp: 5,
|
||||
Size: bytesutil.PadTo([]byte("6"), fieldparams.RootLength),
|
||||
ExtraData: bytesutil.PadTo([]byte("extraData"), fieldparams.RootLength),
|
||||
BaseFeePerGas: bytesutil.PadTo([]byte("baseFeePerGas"), fieldparams.RootLength),
|
||||
Transactions: [][]byte{foo[:]},
|
||||
Uncles: [][]byte{foo[:]},
|
||||
}
|
||||
status := &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_VALID,
|
||||
@@ -931,7 +806,7 @@ func fixtures() map[string]interface{} {
|
||||
forkChoiceInvalidResp := &ForkchoiceUpdatedResponse{
|
||||
Status: &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_INVALID,
|
||||
LatestValidHash: bytesutil.PadTo([]byte("latestValidHash"), 32),
|
||||
LatestValidHash: []byte("latestValidHash"),
|
||||
},
|
||||
PayloadId: &id,
|
||||
}
|
||||
@@ -984,57 +859,6 @@ func fixtures() map[string]interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fullPayloadFromExecutionBlock(t *testing.T) {
|
||||
type args struct {
|
||||
header *pb.ExecutionPayloadHeader
|
||||
block *pb.ExecutionBlock
|
||||
}
|
||||
wantedHash := common.BytesToHash([]byte("foo"))
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want *pb.ExecutionPayload
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "block hash field in header and block hash mismatch",
|
||||
args: args{
|
||||
header: &pb.ExecutionPayloadHeader{
|
||||
BlockHash: []byte("foo"),
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Hash: common.BytesToHash([]byte("bar")),
|
||||
},
|
||||
},
|
||||
err: "does not match execution block hash",
|
||||
},
|
||||
{
|
||||
name: "ok",
|
||||
args: args{
|
||||
header: &pb.ExecutionPayloadHeader{
|
||||
BlockHash: wantedHash[:],
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Hash: wantedHash,
|
||||
},
|
||||
},
|
||||
want: &pb.ExecutionPayload{
|
||||
BlockHash: wantedHash[:],
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wrapped, err := wrapper.WrappedExecutionPayloadHeader(tt.args.header)
|
||||
got, err := fullPayloadFromExecutionBlock(wrapped, tt.args.block)
|
||||
if (err != nil) && !strings.Contains(err.Error(), tt.err) {
|
||||
t.Fatalf("Wanted err %s got %v", tt.err, err)
|
||||
}
|
||||
require.DeepEqual(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testEngineService struct{}
|
||||
|
||||
func (*testEngineService) NoArgsRets() {}
|
||||
|
||||
@@ -30,8 +30,6 @@ var (
|
||||
ErrAcceptedSyncingPayloadStatus = errors.New("payload status is SYNCING or ACCEPTED")
|
||||
// ErrInvalidPayloadStatus when the status of the payload is invalid.
|
||||
ErrInvalidPayloadStatus = errors.New("payload status is INVALID")
|
||||
// ErrInvalidBlockHashPayloadStatus when the status of the payload fails to validate block hash.
|
||||
ErrInvalidBlockHashPayloadStatus = errors.New("payload status is INVALID_BLOCK_HASH")
|
||||
// ErrNilResponse when the response is nil.
|
||||
ErrNilResponse = errors.New("nil response")
|
||||
)
|
||||
|
||||
@@ -31,8 +31,4 @@ var (
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
reconstructedExecutionPayloadCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "reconstructed_execution_payload_count",
|
||||
Help: "Count the number of execution payloads that are reconstructed using JSON-RPC from payload headers",
|
||||
})
|
||||
)
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
go test fuzz v1
|
||||
[]byte("{\"parentHash\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"feeRecipient\":\"0xff01ff01ff01ff01ffff01ff01ff01ff01ff0000\",\"stateRoot\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"receiptsRoot\":\"0xff01ff01ff01ff01ff01ff01ff01ff01000000fffffff0000000000000000000\",\"logsBloom\":\"0x6a756e6b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\"prevRandao\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"blockNumber\":\"0xffffffffffffffff\",\"gasLimit\":\"0xffffffffffffffff\",\"gasUsed\":\"0xffffffffffffffff\",\"timestamp\":\"0x64\",\"extraData\":\"0x\",\"baseFeePerGas\":\"0x7fffff0000000fff\",\"blockHash\":\"0xff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"transactions\":[\"0xff01ff01ff01ff01ff01ff01ff01ff01\",\"0xff01ff01ff01ff01ff01ff01ff01ff01\",\"0xff01ff01ff01ff01ff01ff01ff01ff01\",\"0xff01ff01ff01ff01ff01ff01ff01ff01\"]}")
|
||||
@@ -1,2 +0,0 @@
|
||||
go test fuzz v1
|
||||
[]byte("{\"BAseFeePerGAs\":\"0X0\"}")
|
||||
@@ -1,2 +0,0 @@
|
||||
go test fuzz v1
|
||||
[]byte("{\"parentHash\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"feeRecipient\":\"0xff01ff01ff01ff01ffff01ff01ff0000\",\"stateRoot\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"receiptsRoot\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"logsBloom\":\"0x6a756e6b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\"prevRandao\":\"0xff01ff01ff01ff01ff01ff000000ff0100000000000000000000000000000000\",\"blockNumber\":\"0xffffffffffffffff\",\"gasLimit\":\"0xffffffffffffffff\",\"gasUsed\":\"0xffffffffffffffff\",\"timestamp\":\"0x64\",\"extraData\":\"0x\",\"baseFeePerGas\":\"0x7f0fffffffffffff\",\"blockHash\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"transactions\":[\"0xff01ff01ff01ff01ff01ff01ff01ff01\",\"0xff01ff01ff01ff01ff01ff01ff01ff01\",\"0xff01ff01ff01ff01ff01ff01ff01ff01\",\"0xff01ff01ff01ff01ff01ff01ff01ff01\"]}")
|
||||
@@ -1,2 +0,0 @@
|
||||
go test fuzz v1
|
||||
[]byte("{\"parentHash\":\"0xff01Ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"feeRecipient\":\"0xff01ff01ff01ff01ffff01ff01ff01ff01ff0000\",\"stateRoot\":\"0xff01ff01ff0fff01ff01ff01ff01ff0100000000000000000000000000000000\",\"receiptsRoot\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"logsBloom\":\"0x6a756e6b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\"prevRandao\":\"0xff01ff01ff01ff01ff011f01ff01ff0100000000000000000000000000000000\",\"blockNumber\":\"0xffffffffffffffff\",\"gasLimit\":\"0xffffffffffffffff\",\"gasUsed\":\"0xffffffffffffffff\",\"timestamp\":\"0x64\",\"extraData\":\"0x\",\"baseFeePerGas\":\"0x7fffffffffffffff\",\"blockHash\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"transactions\":[\"0xff01ff01ff01ff01ff01ff01ff01ff01\",\"0xff01ff01ff01ff01ff01ff01ff01ff01\",\"0xff01ff01ff01ff01ff01ff01ff01ff01\",\"0xff01ff01ff01ff01ff01ff01ff01ff01\"]}")
|
||||
@@ -1,2 +0,0 @@
|
||||
go test fuzz v1
|
||||
[]byte("{\"pArentHAsh\":\"\",\"feeReCipient\":\"\",\"stAteRoot\":\"\",\"reCeiptsRoot\":\"\",\"logsBloom\":\"\",\"prevRAndAo\":\"\",\"eXtrADAtA\":\"\",\"BAseFeePerGAs\":\"0X0\",\"BloCkHAsh\":\"\",\"trAnsACtions\":[]}")
|
||||
@@ -1,2 +0,0 @@
|
||||
go test fuzz v1
|
||||
[]byte("{\"parentHash\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"feeRecipient\":\"0xff01ff01ff01ff01ffff01ff01ff01ff01ff0000\",\"stateRoot\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"receiptsRoot\":\"0xff01ff01ff01ff01ff01ff01ff01ff01000000fffffff0000000000000000000\",\"logsBloom\":\"0x6a756e6b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\"prevRandao\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"blockNumber\":\"0xffffffffffffffff\",\"gasLimit\":\"0xffffffffffffffff\",\"gasUsed\":\"0xffffffffffffffff\",\"t\xc1\xc1\xc1\xc1\xc1\xc1\xc1\xc1\":\"0x64\",\"extraData\":\"0x\",\"baseFeePerGas\":\"0x7fffff0000000fff\",\"blockHash\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"transactions\":[\"0xff01ff01ff01ff01ff01ff01ff01ff01\",\"0xff01ff01ff01ff01ff01ff01ff01ff01\",\"0xff01ff01ff01ff01ff01ff01ff01ff01\",\"0xff01ff01ff01ff01ff01ff01ff01ff01\"]}")
|
||||
@@ -1,2 +0,0 @@
|
||||
go test fuzz v1
|
||||
[]byte("{\"parentHash\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"feeRecipient\":\"0xff01ff01ff01ff01ffff01ff01ff01ff01ff0000\",\"stateRoot\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"receiptsRoot\":\"0xff01ff01ff01ff01ff01ff01ff01ff01000000fffffff0000000000000000000\",\"logsBloom\":\"0x6a756e6b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\"prevRandao\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"blockNumber\":\"0xffffffffffffffff\",\"gasLimit\":\"0xffffffffffffffff\",\"gasUsed\":\"0xffffffffffffffff\",\"timestamp\":\"0x64\",\"extraData\":\"0x\",\"baseFeePerGas\":\"0x7fffff0000000fff0000000\",\"blockHash\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"transactions\":[\"0xff01ff01ff01ff01ff01ff01ff01ff01\",\"0xff01ff01ff01ff01ff01ff01ff01ff01\",\"0xff01ff010ff1ff01ff01ff01ff01ff01\",\"0xff01ff01ff01ff01ff01ff01ff01ff01\"]}")
|
||||
@@ -1,2 +0,0 @@
|
||||
go test fuzz v1
|
||||
[]byte("{\"parentHash\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"feeRecipient\":\"0xff01ff01ff01ff01ffff01ff01ff01ff01ff0000\",\"stateRoot\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"receiptsRoot\":\"0xff01ff01ff01ff01ff01ff01ff01ff01000000fffffff0000000000000000000\",\"logsBloom\":\"0x6a756e6b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\"prevRandao\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"blockNu0000000mber\":\"0xffffffffffffffff\",\"gasLimit\":\"0xffffffffffffffff\",\"gasUsed\":\"0xffffffffffffffff\",\"timestamp\":\"0x64\",\"extraData\":\"0x\",\"baseFeePerGas\":\"0x7fffff0000000fff\",\"blockHash\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"transactions\":[\"0xff01ff01ff01ff01ff01ff01ff01ff01\",\"0xff01ff01ff01ff01ff01ff01ff01ff01\",\"0xff01ff01ff01ff01ff01ff01ff01ff01\",\"0xff01ff01ff01ff01ff01ff01ff01ff01\"]}")
|
||||
@@ -18,7 +18,6 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
)
|
||||
@@ -33,7 +32,7 @@ type EngineClient struct {
|
||||
}
|
||||
|
||||
// NewPayload --
|
||||
func (e *EngineClient) NewPayload(_ context.Context, _ interfaces.ExecutionData) ([]byte, error) {
|
||||
func (e *EngineClient) NewPayload(_ context.Context, _ *pb.ExecutionPayload) ([]byte, error) {
|
||||
return e.NewPayloadResp, e.ErrNewPayload
|
||||
}
|
||||
|
||||
@@ -95,8 +94,8 @@ func (e *EngineClient) GetTerminalBlockHash(ctx context.Context) ([]byte, bool,
|
||||
currentTotalDifficulty, _ := uint256.FromBig(b)
|
||||
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
|
||||
|
||||
parentHash := blk.ParentHash
|
||||
if parentHash == params.BeaconConfig().ZeroHash {
|
||||
parentHash := bytesutil.ToBytes32(blk.ParentHash)
|
||||
if len(blk.ParentHash) == 0 || parentHash == params.BeaconConfig().ZeroHash {
|
||||
return nil, false, nil
|
||||
}
|
||||
parentBlk, err := e.ExecutionBlockByHash(ctx, parentHash)
|
||||
@@ -111,7 +110,7 @@ func (e *EngineClient) GetTerminalBlockHash(ctx context.Context) ([]byte, bool,
|
||||
parentTotalDifficulty, _ := uint256.FromBig(b)
|
||||
parentReachedTTD := parentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
|
||||
if !parentReachedTTD {
|
||||
return blk.Hash[:], true, nil
|
||||
return blk.Hash, true, nil
|
||||
}
|
||||
} else {
|
||||
return nil, false, nil
|
||||
|
||||
@@ -52,8 +52,6 @@ func TestGetSpec(t *testing.T) {
|
||||
config.BellatrixForkEpoch = 101
|
||||
config.ShardingForkVersion = []byte("ShardingForkVersion")
|
||||
config.ShardingForkEpoch = 102
|
||||
config.CapellaForkVersion = []byte("CapellaForkVersion")
|
||||
config.CapellaForkEpoch = 103
|
||||
config.BLSWithdrawalPrefixByte = byte('b')
|
||||
config.GenesisDelay = 24
|
||||
config.SecondsPerSlot = 25
|
||||
@@ -135,7 +133,7 @@ func TestGetSpec(t *testing.T) {
|
||||
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 101, len(resp.Data))
|
||||
assert.Equal(t, 99, len(resp.Data))
|
||||
for k, v := range resp.Data {
|
||||
switch k {
|
||||
case "CONFIG_NAME":
|
||||
@@ -204,10 +202,6 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("ShardingForkVersion")), v)
|
||||
case "SHARDING_FORK_EPOCH":
|
||||
assert.Equal(t, "102", v)
|
||||
case "CAPELLA_FORK_VERSION":
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("CapellaForkVersion")), v)
|
||||
case "CAPELLA_FORK_EPOCH":
|
||||
assert.Equal(t, "103", v)
|
||||
case "MIN_ANCHOR_POW_BLOCK_DIFFICULTY":
|
||||
assert.Equal(t, "1000", v)
|
||||
case "BLS_WITHDRAWAL_PREFIX":
|
||||
|
||||
@@ -133,8 +133,8 @@ func (vs *Server) GetProposerDuties(ctx context.Context, req *ethpbv1.ProposerDu
|
||||
|
||||
cs := vs.TimeFetcher.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(cs)
|
||||
if req.Epoch > currentEpoch+1 {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Request epoch %d can not be greater than next epoch %d", req.Epoch, currentEpoch+1)
|
||||
if req.Epoch > currentEpoch {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Request epoch %d can not be greater than current epoch %d", req.Epoch, currentEpoch)
|
||||
}
|
||||
|
||||
s, err := vs.HeadFetcher.HeadState(ctx)
|
||||
@@ -177,7 +177,7 @@ func (vs *Server) GetProposerDuties(ctx context.Context, req *ethpbv1.ProposerDu
|
||||
return duties[i].Slot < duties[j].Slot
|
||||
})
|
||||
|
||||
root, err := vs.proposalDependentRoot(ctx, s, req.Epoch)
|
||||
root, err := proposalDependentRoot(s, req.Epoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get dependent root: %v", err)
|
||||
}
|
||||
@@ -912,7 +912,7 @@ func attestationDependentRoot(s state.BeaconState, epoch types.Epoch) ([]byte, e
|
||||
|
||||
// proposalDependentRoot is get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch) - 1)
|
||||
// or the genesis block root in the case of underflow.
|
||||
func (vs *Server) proposalDependentRoot(ctx context.Context, s state.BeaconState, epoch types.Epoch) ([]byte, error) {
|
||||
func proposalDependentRoot(s state.BeaconState, epoch types.Epoch) ([]byte, error) {
|
||||
var dependentRootSlot types.Slot
|
||||
if epoch == 0 {
|
||||
dependentRootSlot = 0
|
||||
@@ -923,21 +923,10 @@ func (vs *Server) proposalDependentRoot(ctx context.Context, s state.BeaconState
|
||||
}
|
||||
dependentRootSlot = epochStartSlot.Sub(1)
|
||||
}
|
||||
var root []byte
|
||||
var err error
|
||||
// Per spec, if the dependent root epoch is greater than current epoch, use the head root.
|
||||
if dependentRootSlot >= s.Slot() {
|
||||
root, err = vs.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
root, err = helpers.BlockRootAtSlot(s, dependentRootSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get block root")
|
||||
}
|
||||
root, err := helpers.BlockRootAtSlot(s, dependentRootSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get block root")
|
||||
}
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -366,11 +366,11 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
t.Run("Epoch out of bound", func(t *testing.T) {
|
||||
currentEpoch := slots.ToEpoch(bs.Slot())
|
||||
req := ðpbv1.ProposerDutiesRequest{
|
||||
Epoch: currentEpoch + 2,
|
||||
Epoch: currentEpoch + 1,
|
||||
}
|
||||
_, err := vs.GetProposerDuties(ctx, req)
|
||||
require.NotNil(t, err)
|
||||
assert.ErrorContains(t, fmt.Sprintf("Request epoch %d can not be greater than next epoch %d", currentEpoch+2, currentEpoch+1), err)
|
||||
assert.ErrorContains(t, fmt.Sprintf("Request epoch %d can not be greater than current epoch %d", currentEpoch+1, currentEpoch), err)
|
||||
})
|
||||
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
|
||||
@@ -165,7 +165,6 @@ go_test(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_d4l3k_messagediff//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_golang_mock//gomock:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
|
||||
@@ -57,7 +57,12 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return vs.getBellatrixBeaconBlock(ctx, req)
|
||||
blk, err := vs.getBellatrixBeaconBlock(ctx, req)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not fetch Bellatrix beacon block: %v", err)
|
||||
}
|
||||
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Bellatrix{Bellatrix: blk}}, nil
|
||||
}
|
||||
|
||||
// GetBlock is called by a proposer during its assigned slot to request a block to sign
|
||||
@@ -136,11 +141,6 @@ func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk interfaces.
|
||||
return nil, fmt.Errorf("could not tree hash block: %v", err)
|
||||
}
|
||||
|
||||
blk, err = vs.unblindBuilderBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do not block proposal critical path with debug logging or block feed updates.
|
||||
defer func() {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debugf(
|
||||
|
||||
@@ -3,12 +3,10 @@ package validator
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition/interop"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
coreBlock "github.com/prysmaticlabs/prysm/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
@@ -18,35 +16,14 @@ import (
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// blockBuilderTimeout is the maximum amount of time allowed for a block builder to respond to a
|
||||
// block request. This value is known as `BUILDER_PROPOSAL_DELAY_TOLERANCE` in builder spec.
|
||||
const blockBuilderTimeout = 1 * time.Second
|
||||
|
||||
func (vs *Server) getBellatrixBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.GenericBeaconBlock, error) {
|
||||
func (vs *Server) getBellatrixBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlockBellatrix, error) {
|
||||
altairBlk, err := vs.buildAltairBeaconBlock(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
registered, err := vs.validatorRegistered(ctx, altairBlk.ProposerIndex)
|
||||
if registered && err == nil {
|
||||
builderReady, b, err := vs.getAndBuildHeaderBlock(ctx, altairBlk)
|
||||
if err != nil {
|
||||
// In the event of an error, the node should fall back to default execution engine for building block.
|
||||
log.WithError(err).Error("Failed to build a block from external builder, falling " +
|
||||
"back to local execution client")
|
||||
} else if builderReady {
|
||||
return b, nil
|
||||
}
|
||||
} else if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": req.Slot,
|
||||
"validatorIndex": altairBlk.ProposerIndex,
|
||||
}).Errorf("Could not determine validator has registered. Default to local execution client: %v", err)
|
||||
}
|
||||
payload, err := vs.getExecutionPayload(ctx, req.Slot, altairBlk.ProposerIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -83,7 +60,7 @@ func (vs *Server) getBellatrixBeaconBlock(ctx context.Context, req *ethpb.BlockR
|
||||
return nil, fmt.Errorf("could not compute state root: %v", err)
|
||||
}
|
||||
blk.StateRoot = stateRoot
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Bellatrix{Bellatrix: blk}}, nil
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
// This function retrieves the payload header given the slot number and the validator index.
|
||||
@@ -99,7 +76,7 @@ func (vs *Server) getPayloadHeader(ctx context.Context, slot types.Slot, idx typ
|
||||
if blocks.IsPreBellatrixVersion(b.Version()) {
|
||||
return nil, nil
|
||||
}
|
||||
h, err := b.Block().Body().Execution()
|
||||
h, err := b.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -107,7 +84,7 @@ func (vs *Server) getPayloadHeader(ctx context.Context, slot types.Slot, idx typ
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bid, err := vs.BlockBuilder.GetHeader(ctx, slot, bytesutil.ToBytes32(h.BlockHash()), pk)
|
||||
bid, err := vs.BlockBuilder.GetHeader(ctx, slot, bytesutil.ToBytes32(h.BlockHash), pk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -158,7 +135,7 @@ func (vs *Server) buildHeaderBlock(ctx context.Context, b *ethpb.BeaconBlockAlta
|
||||
// This function retrieves the full payload block using the input blind block. This input must be versioned as
|
||||
// bellatrix blind block. The output block will contain the full payload. The original header block
|
||||
// will be returned the block builder is not configured.
|
||||
func (vs *Server) unblindBuilderBlock(ctx context.Context, b interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, error) {
|
||||
func (vs *Server) getBuilderBlock(ctx context.Context, b interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, error) {
|
||||
if err := coreBlock.BeaconBlockIsNil(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -178,14 +155,10 @@ func (vs *Server) unblindBuilderBlock(ctx context.Context, b interfaces.SignedBe
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h, err := b.Block().Body().Execution()
|
||||
h, err := b.Block().Body().ExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
header, ok := h.Proto().(*enginev1.ExecutionPayloadHeader)
|
||||
if !ok {
|
||||
return nil, errors.New("execution data must be execution payload header")
|
||||
}
|
||||
sb := ðpb.SignedBlindedBeaconBlockBellatrix{
|
||||
Block: ðpb.BlindedBeaconBlockBellatrix{
|
||||
Slot: b.Block().Slot(),
|
||||
@@ -202,7 +175,7 @@ func (vs *Server) unblindBuilderBlock(ctx context.Context, b interfaces.SignedBe
|
||||
Deposits: b.Block().Body().Deposits(),
|
||||
VoluntaryExits: b.Block().Body().VoluntaryExits(),
|
||||
SyncAggregate: agg,
|
||||
ExecutionPayloadHeader: header,
|
||||
ExecutionPayloadHeader: h,
|
||||
},
|
||||
},
|
||||
Signature: b.Signature(),
|
||||
@@ -237,82 +210,5 @@ func (vs *Server) unblindBuilderBlock(ctx context.Context, b interfaces.SignedBe
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", h.BlockHash()),
|
||||
"feeRecipient": fmt.Sprintf("%#x", h.FeeRecipient()),
|
||||
"gasUsed": h.GasUsed,
|
||||
"slot": b.Block().Slot(),
|
||||
"txs": len(payload.Transactions),
|
||||
}).Info("Retrieved full payload from builder")
|
||||
|
||||
return wb, nil
|
||||
}
|
||||
|
||||
// readyForBuilder returns true if builder is allowed to be used. Builder is only allowed to be use after the
|
||||
// first finalized checkpt has been execution-enabled.
|
||||
func (vs *Server) readyForBuilder(ctx context.Context) (bool, error) {
|
||||
cp := vs.FinalizationFetcher.FinalizedCheckpt()
|
||||
// Checkpoint root is zero means we are still at genesis epoch.
|
||||
if bytesutil.ToBytes32(cp.Root) == params.BeaconConfig().ZeroHash {
|
||||
return false, nil
|
||||
}
|
||||
b, err := vs.BeaconDB.Block(ctx, bytesutil.ToBytes32(cp.Root))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err = coreBlock.BeaconBlockIsNil(b); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return blocks.IsExecutionBlock(b.Block().Body())
|
||||
}
|
||||
|
||||
// Get and builder header block. Returns a boolean status, built block and error.
|
||||
// If the status is false that means builder the header block is disallowed.
|
||||
// This routine is time limited by `blockBuilderTimeout`.
|
||||
func (vs *Server) getAndBuildHeaderBlock(ctx context.Context, b *ethpb.BeaconBlockAltair) (bool, *ethpb.GenericBeaconBlock, error) {
|
||||
// No op. Builder is not defined. User did not specify a user URL. We should use local EE.
|
||||
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
|
||||
return false, nil, nil
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(ctx, blockBuilderTimeout)
|
||||
defer cancel()
|
||||
// Does the protocol allow for builder at this current moment. Builder is only allowed post merge after finalization.
|
||||
ready, err := vs.readyForBuilder(ctx)
|
||||
if err != nil {
|
||||
return false, nil, errors.Wrap(err, "could not determine if builder is ready")
|
||||
}
|
||||
if !ready {
|
||||
return false, nil, nil
|
||||
}
|
||||
h, err := vs.getPayloadHeader(ctx, b.Slot, b.ProposerIndex)
|
||||
if err != nil {
|
||||
return false, nil, errors.Wrap(err, "could not get payload header")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", h.BlockHash),
|
||||
"feeRecipient": fmt.Sprintf("%#x", h.FeeRecipient),
|
||||
"gasUsed": h.GasUsed,
|
||||
"slot": b.Slot,
|
||||
}).Info("Retrieved header from builder")
|
||||
gb, err := vs.buildHeaderBlock(ctx, b, h)
|
||||
if err != nil {
|
||||
return false, nil, errors.Wrap(err, "could not combine altair block with payload header")
|
||||
}
|
||||
return true, gb, nil
|
||||
}
|
||||
|
||||
// validatorRegistered returns true if validator with index `id` was previously registered in the database.
|
||||
func (vs *Server) validatorRegistered(ctx context.Context, id types.ValidatorIndex) (bool, error) {
|
||||
if vs.BeaconDB == nil {
|
||||
return false, errors.New("nil beacon db")
|
||||
}
|
||||
_, err := vs.BeaconDB.RegistrationByValidatorID(ctx, id)
|
||||
switch {
|
||||
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -3,38 +3,22 @@ package validator
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
blockchainTest "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
builderTest "github.com/prysmaticlabs/prysm/beacon-chain/builder/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
prysmtime "github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestServer_buildHeaderBlock(t *testing.T) {
|
||||
@@ -266,7 +250,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
vs := &Server{BlockBuilder: tc.mock}
|
||||
gotBlk, err := vs.unblindBuilderBlock(context.Background(), tc.blk)
|
||||
gotBlk, err := vs.getBuilderBlock(context.Background(), tc.blk)
|
||||
if err != nil {
|
||||
require.ErrorContains(t, tc.err, err)
|
||||
} else {
|
||||
@@ -275,379 +259,3 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_readyForBuilder(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
vs := &Server{BeaconDB: dbTest.SetupDB(t)}
|
||||
cs := &blockchainTest.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{}} // Checkpoint root is zeros.
|
||||
vs.FinalizationFetcher = cs
|
||||
ready, err := vs.readyForBuilder(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ready)
|
||||
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
wb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
wbr, err := wb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
b1 := util.NewBeaconBlockBellatrix()
|
||||
b1.Block.Body.ExecutionPayload.BlockNumber = 1 // Execution enabled.
|
||||
wb1, err := wrapper.WrappedSignedBeaconBlock(b1)
|
||||
require.NoError(t, err)
|
||||
wbr1, err := wb1.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, vs.BeaconDB.SaveBlock(ctx, wb))
|
||||
require.NoError(t, vs.BeaconDB.SaveBlock(ctx, wb1))
|
||||
|
||||
// Ready is false given finalized block does not have execution.
|
||||
cs = &blockchainTest.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: wbr[:]}}
|
||||
vs.FinalizationFetcher = cs
|
||||
ready, err = vs.readyForBuilder(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ready)
|
||||
|
||||
// Ready is true given finalized block has execution.
|
||||
cs = &blockchainTest.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: wbr1[:]}}
|
||||
vs.FinalizationFetcher = cs
|
||||
ready, err = vs.readyForBuilder(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ready)
|
||||
}
|
||||
|
||||
func TestServer_getAndBuildHeaderBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
vs := &Server{}
|
||||
|
||||
// Nil builder
|
||||
ready, _, err := vs.getAndBuildHeaderBlock(ctx, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ready)
|
||||
|
||||
// Not configured
|
||||
vs.BlockBuilder = &builderTest.MockBuilderService{}
|
||||
ready, _, err = vs.getAndBuildHeaderBlock(ctx, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ready)
|
||||
|
||||
// Block is not ready
|
||||
vs.BlockBuilder = &builderTest.MockBuilderService{HasConfigured: true}
|
||||
vs.FinalizationFetcher = &blockchainTest.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{}}
|
||||
ready, _, err = vs.getAndBuildHeaderBlock(ctx, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ready)
|
||||
|
||||
// Failed to get header
|
||||
b1 := util.NewBeaconBlockBellatrix()
|
||||
b1.Block.Body.ExecutionPayload.BlockNumber = 1 // Execution enabled.
|
||||
wb1, err := wrapper.WrappedSignedBeaconBlock(b1)
|
||||
require.NoError(t, err)
|
||||
wbr1, err := wb1.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
vs.BeaconDB = dbTest.SetupDB(t)
|
||||
require.NoError(t, vs.BeaconDB.SaveBlock(ctx, wb1))
|
||||
vs.FinalizationFetcher = &blockchainTest.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: wbr1[:]}}
|
||||
vs.HeadFetcher = &blockchainTest.ChainService{Block: wb1}
|
||||
vs.BlockBuilder = &builderTest.MockBuilderService{HasConfigured: true, ErrGetHeader: errors.New("could not get payload")}
|
||||
ready, _, err = vs.getAndBuildHeaderBlock(ctx, ðpb.BeaconBlockAltair{})
|
||||
require.ErrorContains(t, "could not get payload", err)
|
||||
require.Equal(t, false, ready)
|
||||
|
||||
// Block built and validated!
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
beaconState, keys := util.DeterministicGenesisStateAltair(t, 16384)
|
||||
sCom, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(sCom))
|
||||
copiedState := beaconState.Copy()
|
||||
|
||||
b, err := util.GenerateFullBlockAltair(copiedState, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
r := bytesutil.ToBytes32(b.Block.ParentRoot)
|
||||
util.SaveBlock(t, ctx, vs.BeaconDB, b)
|
||||
require.NoError(t, vs.BeaconDB.SaveState(ctx, beaconState, r))
|
||||
|
||||
altairBlk, err := util.GenerateFullBlockAltair(copiedState, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
h := &v1.ExecutionPayloadHeader{
|
||||
BlockNumber: 123,
|
||||
GasLimit: 456,
|
||||
GasUsed: 789,
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
}
|
||||
|
||||
vs.StateGen = stategen.New(vs.BeaconDB)
|
||||
vs.BlockBuilder = &builderTest.MockBuilderService{HasConfigured: true, Bid: ðpb.SignedBuilderBid{Message: ðpb.BuilderBid{Header: h}}}
|
||||
ready, builtBlk, err := vs.getAndBuildHeaderBlock(ctx, altairBlk.Block)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ready)
|
||||
require.DeepEqual(t, h, builtBlk.GetBlindedBellatrix().Body.ExecutionPayloadHeader)
|
||||
}
|
||||
|
||||
func TestServer_GetBellatrixBeaconBlock_HappyCase(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
terminalBlockHash := bytesutil.PadTo([]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 32)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.MainnetConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.TerminalBlockHash = common.BytesToHash(terminalBlockHash)
|
||||
cfg.TerminalBlockHashActivationEpoch = 2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
emptyPayload := &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: bellatrixSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: bitfield.NewBitvector512(), SyncCommitteeSignature: make([]byte, 96)},
|
||||
ExecutionPayload: emptyPayload,
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blkRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot), "Could not save genesis state")
|
||||
|
||||
proposerServer := &Server{
|
||||
HeadFetcher: &blockchainTest.ChainService{State: beaconState, Root: parentRoot[:], Optimistic: false},
|
||||
TimeFetcher: &blockchainTest.ChainService{Genesis: time.Now()},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &blockchainTest.ChainService{},
|
||||
HeadUpdater: &blockchainTest.ChainService{},
|
||||
ChainStartFetcher: &mockPOW.POWChain{},
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
ExecutionEngineCaller: &mockPOW.EngineClient{
|
||||
PayloadIDBytes: &v1.PayloadIDBytes{1},
|
||||
ExecutionPayload: emptyPayload,
|
||||
},
|
||||
BeaconDB: db,
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
BlockBuilder: &builderTest.MockBuilderService{},
|
||||
}
|
||||
proposerServer.ProposerSlotIndexCache.SetProposerAndPayloadIDs(65, 40, [8]byte{'a'})
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
block, err := proposerServer.getBellatrixBeaconBlock(ctx, ðpb.BlockRequest{
|
||||
Slot: bellatrixSlot + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_Bellatrix)
|
||||
require.Equal(t, true, ok)
|
||||
require.LogsContain(t, hook, "Computed state root")
|
||||
require.DeepEqual(t, emptyPayload, bellatrixBlk.Bellatrix.Body.ExecutionPayload) // Payload should equal.
|
||||
}
|
||||
|
||||
func TestServer_GetBellatrixBeaconBlock_BuilderCase(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.MainnetConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
bellatrixSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
emptyPayload := &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: bellatrixSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: bitfield.NewBitvector512(), SyncCommitteeSignature: make([]byte, 96)},
|
||||
ExecutionPayload: emptyPayload,
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, blkRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot), "Could not save genesis state")
|
||||
|
||||
b1 := util.NewBeaconBlockBellatrix()
|
||||
b1.Block.Body.ExecutionPayload.BlockNumber = 1 // Execution enabled.
|
||||
wb1, err := wrapper.WrappedSignedBeaconBlock(b1)
|
||||
require.NoError(t, err)
|
||||
wbr1, err := wb1.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wb1))
|
||||
|
||||
random, err := helpers.RandaoMix(beaconState, prysmtime.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
tstamp, err := slots.ToTime(beaconState.GenesisTime(), bellatrixSlot+1)
|
||||
require.NoError(t, err)
|
||||
h := &v1.ExecutionPayloadHeader{
|
||||
BlockNumber: 123,
|
||||
GasLimit: 456,
|
||||
GasUsed: 789,
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: random,
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
Timestamp: uint64(tstamp.Unix()),
|
||||
}
|
||||
|
||||
proposerServer := &Server{
|
||||
FinalizationFetcher: &blockchainTest.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: wbr1[:]}},
|
||||
HeadFetcher: &blockchainTest.ChainService{State: beaconState, Root: parentRoot[:], Optimistic: false, Block: wb1},
|
||||
TimeFetcher: &blockchainTest.ChainService{Genesis: time.Unix(int64(beaconState.GenesisTime()), 0)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &blockchainTest.ChainService{},
|
||||
HeadUpdater: &blockchainTest.ChainService{},
|
||||
ChainStartFetcher: &mockPOW.POWChain{},
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
ExecutionEngineCaller: &mockPOW.EngineClient{
|
||||
PayloadIDBytes: &v1.PayloadIDBytes{1},
|
||||
ExecutionPayload: emptyPayload,
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true, Bid: ðpb.SignedBuilderBid{Message: ðpb.BuilderBid{Header: h}}},
|
||||
}
|
||||
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, proposerServer.BeaconDB.SaveRegistrationsByValidatorIDs(ctx, []types.ValidatorIndex{40},
|
||||
[]*ethpb.ValidatorRegistrationV1{{FeeRecipient: bytesutil.PadTo([]byte{}, fieldparams.FeeRecipientLength), Pubkey: bytesutil.PadTo([]byte{}, fieldparams.BLSPubkeyLength)}}))
|
||||
block, err := proposerServer.getBellatrixBeaconBlock(ctx, ðpb.BlockRequest{
|
||||
Slot: bellatrixSlot + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk, ok := block.GetBlock().(*ethpb.GenericBeaconBlock_BlindedBellatrix)
|
||||
require.Equal(t, true, ok)
|
||||
require.LogsContain(t, hook, "Computed state root")
|
||||
require.DeepEqual(t, h, bellatrixBlk.BlindedBellatrix.Body.ExecutionPayloadHeader) // Payload header should equal.
|
||||
}
|
||||
|
||||
func TestServer_validatorRegistered(t *testing.T) {
|
||||
proposerServer := &Server{}
|
||||
ctx := context.Background()
|
||||
|
||||
reg, err := proposerServer.validatorRegistered(ctx, 0)
|
||||
require.ErrorContains(t, "nil beacon db", err)
|
||||
require.Equal(t, false, reg)
|
||||
|
||||
proposerServer.BeaconDB = dbTest.SetupDB(t)
|
||||
reg, err = proposerServer.validatorRegistered(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, reg)
|
||||
|
||||
f := bytesutil.PadTo([]byte{}, fieldparams.FeeRecipientLength)
|
||||
p := bytesutil.PadTo([]byte{}, fieldparams.BLSPubkeyLength)
|
||||
require.NoError(t, proposerServer.BeaconDB.SaveRegistrationsByValidatorIDs(ctx, []types.ValidatorIndex{0, 1},
|
||||
[]*ethpb.ValidatorRegistrationV1{{FeeRecipient: f, Pubkey: p}, {FeeRecipient: f, Pubkey: p}}))
|
||||
|
||||
reg, err = proposerServer.validatorRegistered(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, reg)
|
||||
reg, err = proposerServer.validatorRegistered(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, reg)
|
||||
}
|
||||
|
||||
@@ -105,11 +105,11 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot types.Slot, vIdx
|
||||
switch finalizedBlock.Version() {
|
||||
case version.Phase0, version.Altair: // Blocks before Bellatrix don't have execution payloads. Use zeros as the hash.
|
||||
default:
|
||||
finalizedPayload, err := finalizedBlock.Block().Body().Execution()
|
||||
finalizedPayload, err := finalizedBlock.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
finalizedBlockHash = finalizedPayload.BlockHash()
|
||||
finalizedBlockHash = finalizedPayload.BlockHash
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
chainMock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
@@ -14,7 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -48,9 +46,7 @@ func TestServer_getExecutionPayload(t *testing.T) {
|
||||
}))
|
||||
|
||||
transitionSt, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(&pb.ExecutionPayloadHeader{BlockNumber: 1})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, transitionSt.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
require.NoError(t, transitionSt.SetLatestExecutionPayloadHeader(&pb.ExecutionPayloadHeader{BlockNumber: 1}))
|
||||
b2pb := util.NewBeaconBlockBellatrix()
|
||||
b2r, err := b2pb.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -147,9 +143,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) {
|
||||
}))
|
||||
|
||||
transitionSt, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(&pb.ExecutionPayloadHeader{BlockNumber: 1})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, transitionSt.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
require.NoError(t, transitionSt.SetLatestExecutionPayloadHeader(&pb.ExecutionPayloadHeader{BlockNumber: 1}))
|
||||
b2pb := util.NewBeaconBlockBellatrix()
|
||||
b2r, err := b2pb.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -209,7 +203,7 @@ func TestServer_getTerminalBlockHashIfExists(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "use terminal block hash, doesn't exist",
|
||||
paramsTerminalHash: common.BytesToHash([]byte("a")).Bytes(),
|
||||
paramsTerminalHash: []byte{'a'},
|
||||
errString: "could not fetch height for hash",
|
||||
},
|
||||
{
|
||||
@@ -224,21 +218,17 @@ func TestServer_getTerminalBlockHashIfExists(t *testing.T) {
|
||||
name: "use terminal total difficulty",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: common.BytesToHash([]byte("a")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("b")),
|
||||
},
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: []byte{'b'},
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
parentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: common.BytesToHash([]byte("b")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("c")),
|
||||
},
|
||||
Hash: []byte{'b'},
|
||||
ParentHash: []byte{'c'},
|
||||
TotalDifficulty: "0x1",
|
||||
},
|
||||
wantExists: true,
|
||||
wantTerminalBlockHash: common.BytesToHash([]byte("a")).Bytes(),
|
||||
wantTerminalBlockHash: []byte{'a'},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -250,7 +240,7 @@ func TestServer_getTerminalBlockHashIfExists(t *testing.T) {
|
||||
var m map[[32]byte]*pb.ExecutionBlock
|
||||
if tt.parentPowBlock != nil {
|
||||
m = map[[32]byte]*pb.ExecutionBlock{
|
||||
tt.parentPowBlock.Hash: tt.parentPowBlock,
|
||||
bytesutil.ToBytes32(tt.parentPowBlock.Hash): tt.parentPowBlock,
|
||||
}
|
||||
}
|
||||
c := powtesting.NewPOWChain()
|
||||
|
||||
@@ -11,7 +11,6 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -84,7 +83,7 @@ type WriteOnlyBeaconState interface {
|
||||
SetSlashings(val []uint64) error
|
||||
UpdateSlashingsAtIndex(idx, val uint64) error
|
||||
AppendHistoricalRoots(root [32]byte) error
|
||||
SetLatestExecutionPayloadHeader(payload interfaces.ExecutionData) error
|
||||
SetLatestExecutionPayloadHeader(payload *enginev1.ExecutionPayloadHeader) error
|
||||
}
|
||||
|
||||
// ReadOnlyValidator defines a struct which only has read access to validator methods.
|
||||
|
||||
@@ -64,7 +64,6 @@ go_library(
|
||||
"//beacon-chain/state/types:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
@@ -114,7 +113,6 @@ go_test(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
|
||||
@@ -46,7 +46,6 @@ func (b *BeaconState) UnrealizedCheckpointBalances() (uint64, uint64, uint64, er
|
||||
return 0, 0, 0, errNotSupported("UnrealizedCheckpointBalances", b.version)
|
||||
}
|
||||
|
||||
currentEpoch := time.CurrentEpoch(b)
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
@@ -56,6 +55,7 @@ func (b *BeaconState) UnrealizedCheckpointBalances() (uint64, uint64, uint64, er
|
||||
return 0, 0, 0, ErrNilParticipation
|
||||
}
|
||||
|
||||
currentEpoch := time.CurrentEpoch(b)
|
||||
return stateutil.UnrealizedCheckpointBalances(cp, pp, b.validators, currentEpoch)
|
||||
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -181,9 +180,7 @@ func TestComputeFieldRootsWithHasher_Bellatrix(t *testing.T) {
|
||||
require.NoError(t, beaconState.SetInactivityScores([]uint64{1, 2, 3}))
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(syncCommittee("current")))
|
||||
require.NoError(t, beaconState.SetNextSyncCommittee(syncCommittee("next")))
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(executionPayloadHeader())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
require.NoError(t, beaconState.SetLatestExecutionPayloadHeader(executionPayloadHeader()))
|
||||
|
||||
v1State, ok := beaconState.(*v3.BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
@@ -1,27 +1,22 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
nativetypes "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native/types"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
_ "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
)
|
||||
|
||||
// SetLatestExecutionPayloadHeader for the beacon state.
|
||||
func (b *BeaconState) SetLatestExecutionPayloadHeader(val interfaces.ExecutionData) error {
|
||||
func (b *BeaconState) SetLatestExecutionPayloadHeader(val *enginev1.ExecutionPayloadHeader) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.version == version.Phase0 || b.version == version.Altair {
|
||||
return errNotSupported("SetLatestExecutionPayloadHeader", b.version)
|
||||
}
|
||||
header, ok := val.Proto().(*enginev1.ExecutionPayloadHeader)
|
||||
if !ok {
|
||||
return errors.New("value must be an execution payload header")
|
||||
}
|
||||
b.latestExecutionPayloadHeader = header
|
||||
|
||||
b.latestExecutionPayloadHeader = val
|
||||
b.markFieldAsDirty(nativetypes.LatestExecutionPayloadHeader)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -8,11 +8,6 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// UnrealizedCheckpointBalances returns the total current active balance, the
|
||||
// total previous epoch correctly attested for target balance, and the total
|
||||
// current epoch correctly attested for target balance. It takes the current and
|
||||
// previous epoch participation bits as parameters so implicitly only works for
|
||||
// beacon states post-Altair.
|
||||
func UnrealizedCheckpointBalances(cp, pp []byte, validators []*ethpb.Validator, currentEpoch types.Epoch) (uint64, uint64, uint64, error) {
|
||||
targetIdx := params.BeaconConfig().TimelyTargetFlagIndex
|
||||
activeBalance := uint64(0)
|
||||
|
||||
@@ -40,7 +40,6 @@ go_library(
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
|
||||
@@ -2,7 +2,7 @@ package v1
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -57,6 +57,6 @@ func (*BeaconState) SetInactivityScores(_ []uint64) error {
|
||||
}
|
||||
|
||||
// SetLatestExecutionPayloadHeader is not supported for phase 0 beacon state.
|
||||
func (*BeaconState) SetLatestExecutionPayloadHeader(_ interfaces.ExecutionData) error {
|
||||
func (*BeaconState) SetLatestExecutionPayloadHeader(_ *enginev1.ExecutionPayloadHeader) error {
|
||||
return errors.New("SetLatestExecutionPayloadHeader is not supported for phase 0 beacon state")
|
||||
}
|
||||
|
||||
@@ -43,7 +43,6 @@ go_library(
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
|
||||
@@ -2,7 +2,7 @@ package v2
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -22,6 +22,6 @@ func (*BeaconState) RotateAttestations() error {
|
||||
}
|
||||
|
||||
// SetLatestExecutionPayloadHeader is not supported for hard fork 1 beacon state.
|
||||
func (*BeaconState) SetLatestExecutionPayloadHeader(_ interfaces.ExecutionData) error {
|
||||
func (*BeaconState) SetLatestExecutionPayloadHeader(_ *enginev1.ExecutionPayloadHeader) error {
|
||||
return errors.New("SetLatestExecutionPayloadHeader is not supported for hard fork 1 beacon state")
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user