mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
83 Commits
skipSlashi
...
old-clock-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5a4adba3ae | ||
|
|
eb2303f02f | ||
|
|
cd7fd7ebb6 | ||
|
|
c10cbd9291 | ||
|
|
e5ab259ee1 | ||
|
|
9e74c3d641 | ||
|
|
59dcea81c2 | ||
|
|
9149dc2aae | ||
|
|
a7c9c76b18 | ||
|
|
5a4edf897f | ||
|
|
2d6b157eea | ||
|
|
32745b5484 | ||
|
|
bfdaf2ec5a | ||
|
|
39c343bcab | ||
|
|
de1ecf2d60 | ||
|
|
7aee67af90 | ||
|
|
9830ce43d6 | ||
|
|
63a8690140 | ||
|
|
7978a0269b | ||
|
|
021df67fdc | ||
|
|
f20e6351f5 | ||
|
|
a2e834a683 | ||
|
|
da25624b1d | ||
|
|
1a5dd879c5 | ||
|
|
65a9ede2d3 | ||
|
|
4b083c2ca9 | ||
|
|
82ae4caca8 | ||
|
|
e770f0fe1f | ||
|
|
30b8fba2ac | ||
|
|
5d94fd48ca | ||
|
|
176d763091 | ||
|
|
97dc9ebbc8 | ||
|
|
6a74dcf35b | ||
|
|
a775798d89 | ||
|
|
e2442bb0a6 | ||
|
|
2669006694 | ||
|
|
5722a5793c | ||
|
|
96aba590b5 | ||
|
|
2162ffb05f | ||
|
|
d41805a39c | ||
|
|
819632dfc5 | ||
|
|
10fffa6e7c | ||
|
|
5cda86bb93 | ||
|
|
f2dcc9a570 | ||
|
|
63354b5bb7 | ||
|
|
e48f0aef41 | ||
|
|
ab1defc5de | ||
|
|
b0e15f3c8f | ||
|
|
e01a898264 | ||
|
|
7c30533870 | ||
|
|
7654ffdcfc | ||
|
|
d6031ac386 | ||
|
|
f7d3b5c510 | ||
|
|
c0f3946f58 | ||
|
|
c33acde64e | ||
|
|
8310d22a05 | ||
|
|
3060096233 | ||
|
|
5d06c14cec | ||
|
|
57abf02e34 | ||
|
|
44218a9c5b | ||
|
|
d53f2c7661 | ||
|
|
5e53d6976e | ||
|
|
7fd2c08be3 | ||
|
|
6695e7c756 | ||
|
|
aeede2165d | ||
|
|
67a15183ca | ||
|
|
208dc80702 | ||
|
|
e80806d455 | ||
|
|
80d0a82f9b | ||
|
|
f9b3cde005 | ||
|
|
d8f9750387 | ||
|
|
5e8b9dde5b | ||
|
|
c2caff4230 | ||
|
|
b67c885995 | ||
|
|
60ed488428 | ||
|
|
2f0e2886b4 | ||
|
|
2d53ae79df | ||
|
|
ce277cb388 | ||
|
|
c9a366c36a | ||
|
|
3a957c567f | ||
|
|
77a63f871d | ||
|
|
8dd8ccc147 | ||
|
|
3c48bce3a3 |
@@ -5,6 +5,7 @@
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.2.0-rc.1)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-alpha.9/src/engine)
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
[](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
|
||||
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -309,9 +309,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "4e8a18b21d056c4032605621b1a6632198eabab57cb90c61e273f344c287f1b2",
|
||||
strip_prefix = "eth2-networks-791a5369c5981e829698b17fbcdcdacbdaba97c8",
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/791a5369c5981e829698b17fbcdcdacbdaba97c8.tar.gz",
|
||||
sha256 = "126b615e3853e29b61f082f6c89c8bc1c38cd92fb84b0004396fc49e7acc8d9f",
|
||||
strip_prefix = "eth2-networks-f3ccbe0cf5798d5cd23e4e6e7119aefa043c0935",
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/f3ccbe0cf5798d5cd23e4e6e7119aefa043c0935.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -342,9 +342,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "4797a7e594a5b1f4c1c8080701613f3ee451b01ec0861499ea7d9b60877a6b23",
|
||||
sha256 = "98013b40922e54a64996da49b939e0a88fe2456f68eedc5aee4ceba0f8623f71",
|
||||
urls = [
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.3/prysm-web-ui.tar.gz",
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.0/prysm-web-ui.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -12,11 +12,13 @@ go_library(
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -36,6 +38,7 @@ go_test(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -12,12 +12,13 @@ import (
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/tracing"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -128,37 +129,51 @@ func (c *Client) NodeURL() string {
|
||||
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
// do is a generic, opinionated GET function to reduce boilerplate amongst the getters in this packageapi/client/builder/types.go.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) ([]byte, error) {
|
||||
// do is a generic, opinionated request function to reduce boilerplate amongst the methods in this package api/client/builder/types.go.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) (res []byte, err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.client.do")
|
||||
defer func() {
|
||||
tracing.AnnotateError(span, err)
|
||||
span.End()
|
||||
}()
|
||||
|
||||
u := c.baseURL.ResolveReference(&url.URL{Path: path})
|
||||
log.Printf("requesting %s", u.String())
|
||||
|
||||
span.AddAttributes(trace.StringAttribute("url", u.String()),
|
||||
trace.StringAttribute("method", method))
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, method, u.String(), body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(req)
|
||||
}
|
||||
for _, o := range c.obvs {
|
||||
if err := o.observe(req); err != nil {
|
||||
return nil, err
|
||||
if err = o.observe(req); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
r, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
err = r.Body.Close()
|
||||
closeErr := r.Body.Close()
|
||||
if closeErr != nil {
|
||||
log.WithError(closeErr).Error("Failed to close response body")
|
||||
}
|
||||
}()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return nil, non200Err(r)
|
||||
err = non200Err(r)
|
||||
return
|
||||
}
|
||||
b, err := io.ReadAll(r.Body)
|
||||
res, err = io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error reading http response body from GetBlock")
|
||||
err = errors.Wrap(err, "error reading http response body from builder server")
|
||||
return
|
||||
}
|
||||
return b, nil
|
||||
return
|
||||
}
|
||||
|
||||
var execHeaderTemplate = template.Must(template.New("").Parse(getExecHeaderPath))
|
||||
@@ -201,8 +216,14 @@ func (c *Client) GetHeader(ctx context.Context, slot types.Slot, parentHash [32]
|
||||
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
|
||||
// fields with 0x prefixes) and posts to the builder validator registration endpoint.
|
||||
func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.client.RegisterValidator")
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.Int64Attribute("num_reqs", int64(len(svr))))
|
||||
|
||||
if len(svr) == 0 {
|
||||
return errors.Wrap(errMalformedRequest, "empty validator registration list")
|
||||
err := errors.Wrap(errMalformedRequest, "empty validator registration list")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
vs := make([]*SignedValidatorRegistration, len(svr))
|
||||
for i := 0; i < len(svr); i++ {
|
||||
@@ -210,8 +231,11 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
}
|
||||
body, err := json.Marshal(vs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error encoding the SignedValidatorRegistration value body in RegisterValidator")
|
||||
err := errors.Wrap(err, "error encoding the SignedValidatorRegistration value body in RegisterValidator")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body))
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -31,6 +31,22 @@ func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||
if r.SignedValidatorRegistrationV1 == nil {
|
||||
r.SignedValidatorRegistrationV1 = ð.SignedValidatorRegistrationV1{}
|
||||
}
|
||||
o := struct {
|
||||
Message *ValidatorRegistration `json:"message,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{}
|
||||
if err := json.Unmarshal(b, &o); err != nil {
|
||||
return err
|
||||
}
|
||||
r.Message = o.Message.ValidatorRegistrationV1
|
||||
r.Signature = o.Signature
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
|
||||
@@ -45,6 +61,33 @@ func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
})
|
||||
}
|
||||
|
||||
func (r *ValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||
if r.ValidatorRegistrationV1 == nil {
|
||||
r.ValidatorRegistrationV1 = ð.ValidatorRegistrationV1{}
|
||||
}
|
||||
o := struct {
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
|
||||
GasLimit string `json:"gas_limit,omitempty"`
|
||||
Timestamp string `json:"timestamp,omitempty"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
|
||||
}{}
|
||||
if err := json.Unmarshal(b, &o); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.FeeRecipient = o.FeeRecipient
|
||||
r.Pubkey = o.Pubkey
|
||||
var err error
|
||||
if r.GasLimit, err = strconv.ParseUint(o.GasLimit, 10, 64); err != nil {
|
||||
return errors.Wrap(err, "failed to parse gas limit")
|
||||
}
|
||||
if r.Timestamp, err = strconv.ParseUint(o.Timestamp, 10, 64); err != nil {
|
||||
return errors.Wrap(err, "failed to parse timestamp")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type Uint256 struct {
|
||||
*big.Int
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -31,7 +32,8 @@ func TestSignedValidatorRegistration_MarshalJSON(t *testing.T) {
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
je, err := json.Marshal(&SignedValidatorRegistration{SignedValidatorRegistrationV1: svr})
|
||||
a := &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr}
|
||||
je, err := json.Marshal(a)
|
||||
require.NoError(t, err)
|
||||
// decode with a struct w/ plain strings so we can check the string encoding of the hex fields
|
||||
un := struct {
|
||||
@@ -45,6 +47,14 @@ func TestSignedValidatorRegistration_MarshalJSON(t *testing.T) {
|
||||
require.Equal(t, "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", un.Signature)
|
||||
require.Equal(t, "0x0000000000000000000000000000000000000000", un.Message.FeeRecipient)
|
||||
require.Equal(t, "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", un.Message.Pubkey)
|
||||
|
||||
t.Run("roundtrip", func(t *testing.T) {
|
||||
b := &SignedValidatorRegistration{}
|
||||
if err := json.Unmarshal(je, b); err != nil {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, proto.Equal(a.SignedValidatorRegistrationV1, b.SignedValidatorRegistrationV1), true)
|
||||
})
|
||||
}
|
||||
|
||||
var testExampleHeaderResponse = `{
|
||||
|
||||
@@ -3,6 +3,7 @@ package async_test
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -16,7 +17,7 @@ func TestDebounce_NoEvents(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
interval := time.Second
|
||||
timesHandled := 0
|
||||
timesHandled := int32(0)
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
@@ -26,21 +27,21 @@ func TestDebounce_NoEvents(t *testing.T) {
|
||||
}()
|
||||
go func() {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
timesHandled++
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
wg.Done()
|
||||
}()
|
||||
if util.WaitTimeout(wg, interval*2) {
|
||||
t.Fatalf("Test should have exited by now, timed out")
|
||||
}
|
||||
assert.Equal(t, 0, timesHandled, "Wrong number of handled calls")
|
||||
assert.Equal(t, int32(0), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||
}
|
||||
|
||||
func TestDebounce_CtxClosing(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
interval := time.Second
|
||||
timesHandled := 0
|
||||
timesHandled := int32(0)
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
@@ -62,23 +63,23 @@ func TestDebounce_CtxClosing(t *testing.T) {
|
||||
}()
|
||||
go func() {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
timesHandled++
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
wg.Done()
|
||||
}()
|
||||
if util.WaitTimeout(wg, interval*2) {
|
||||
t.Fatalf("Test should have exited by now, timed out")
|
||||
}
|
||||
assert.Equal(t, 0, timesHandled, "Wrong number of handled calls")
|
||||
assert.Equal(t, int32(0), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||
}
|
||||
|
||||
func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
interval := time.Second
|
||||
timesHandled := 0
|
||||
timesHandled := int32(0)
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
timesHandled++
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
for i := 0; i < 100; i++ {
|
||||
eventsChan <- struct{}{}
|
||||
@@ -86,7 +87,7 @@ func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||
// We should expect 100 rapid fire changes to only have caused
|
||||
// 1 handler to trigger after the debouncing period.
|
||||
time.Sleep(interval * 2)
|
||||
assert.Equal(t, 1, timesHandled, "Wrong number of handled calls")
|
||||
assert.Equal(t, int32(1), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||
cancel()
|
||||
}
|
||||
|
||||
@@ -94,23 +95,23 @@ func TestDebounce_MultipleHandlerInvocation(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
interval := time.Second
|
||||
timesHandled := 0
|
||||
timesHandled := int32(0)
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
timesHandled++
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
for i := 0; i < 100; i++ {
|
||||
eventsChan <- struct{}{}
|
||||
}
|
||||
require.Equal(t, 0, timesHandled, "Events must prevent from handler execution")
|
||||
require.Equal(t, int32(0), atomic.LoadInt32(×Handled), "Events must prevent from handler execution")
|
||||
|
||||
// By this time the first event should be triggered.
|
||||
time.Sleep(2 * time.Second)
|
||||
assert.Equal(t, 1, timesHandled, "Wrong number of handled calls")
|
||||
assert.Equal(t, int32(1), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||
|
||||
// Second event.
|
||||
eventsChan <- struct{}{}
|
||||
time.Sleep(2 * time.Second)
|
||||
assert.Equal(t, 2, timesHandled, "Wrong number of handled calls")
|
||||
assert.Equal(t, int32(2), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||
|
||||
cancel()
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package async_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -11,15 +12,15 @@ import (
|
||||
func TestEveryRuns(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
i := 0
|
||||
i := int32(0)
|
||||
async.RunEvery(ctx, 100*time.Millisecond, func() {
|
||||
i++
|
||||
atomic.AddInt32(&i, 1)
|
||||
})
|
||||
|
||||
// Sleep for a bit and ensure the value has increased.
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
if i == 0 {
|
||||
if atomic.LoadInt32(&i) == 0 {
|
||||
t.Error("Counter failed to increment with ticker")
|
||||
}
|
||||
|
||||
@@ -28,12 +29,12 @@ func TestEveryRuns(t *testing.T) {
|
||||
// Sleep for a bit to let the cancel take place.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
last := i
|
||||
last := atomic.LoadInt32(&i)
|
||||
|
||||
// Sleep for a bit and ensure the value has not increased.
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
if i != last {
|
||||
if atomic.LoadInt32(&i) != last {
|
||||
t.Error("Counter incremented after stop")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"chain_info.go",
|
||||
"clock.go",
|
||||
"error.go",
|
||||
"execution_engine.go",
|
||||
"head.go",
|
||||
@@ -52,6 +53,7 @@ go_library(
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/geninit:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
@@ -59,11 +61,9 @@ go_library(
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/forks/bellatrix:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
|
||||
@@ -27,8 +27,8 @@ type ChainInfoFetcher interface {
|
||||
GenesisFetcher
|
||||
CanonicalFetcher
|
||||
ForkFetcher
|
||||
TimeFetcher
|
||||
HeadDomainFetcher
|
||||
ClockProvider
|
||||
}
|
||||
|
||||
// HeadUpdater defines a common interface for methods in blockchain service
|
||||
@@ -37,9 +37,7 @@ type HeadUpdater interface {
|
||||
UpdateHead(context.Context) error
|
||||
}
|
||||
|
||||
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
|
||||
type TimeFetcher interface {
|
||||
GenesisTime() time.Time
|
||||
type CurrentSlotter interface {
|
||||
CurrentSlot() types.Slot
|
||||
}
|
||||
|
||||
@@ -220,11 +218,6 @@ func (s *Service) HeadETH1Data() *ethpb.Eth1Data {
|
||||
return s.head.state.Eth1Data()
|
||||
}
|
||||
|
||||
// GenesisTime returns the genesis time of beacon chain.
|
||||
func (s *Service) GenesisTime() time.Time {
|
||||
return s.genesisTime
|
||||
}
|
||||
|
||||
// GenesisValidatorsRoot returns the genesis validators
|
||||
// root of the chain.
|
||||
func (s *Service) GenesisValidatorsRoot() [32]byte {
|
||||
@@ -343,8 +336,14 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Historical non-canonical blocks here are returned as optimistic for safety.
|
||||
isCanonical, err := s.IsCanonical(ctx, root)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if slots.ToEpoch(ss.Slot)+1 < validatedCheckpoint.Epoch {
|
||||
return false, nil
|
||||
return !isCanonical, nil
|
||||
}
|
||||
|
||||
// Checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
|
||||
@@ -359,22 +358,49 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
||||
if ss.Slot > lastValidated.Slot {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
isCanonical, err := s.IsCanonical(ctx, root)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Historical non-canonical blocks here are returned as optimistic for safety.
|
||||
return !isCanonical, nil
|
||||
}
|
||||
|
||||
// SetGenesisTime sets the genesis time of beacon chain.
|
||||
func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t
|
||||
s.clock = NewClock(t)
|
||||
close(s.clockReady)
|
||||
}
|
||||
|
||||
func (s *Service) setClock(c Clock) {
|
||||
s.clock = c
|
||||
close(s.clockReady)
|
||||
}
|
||||
|
||||
func (s *Service) genesisTime() time.Time {
|
||||
c, err := s.WaitForClock(context.TODO())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return c.GenesisTime()
|
||||
}
|
||||
|
||||
// ForkChoiceStore returns the fork choice store in the service.
|
||||
func (s *Service) ForkChoiceStore() forkchoice.ForkChoicer {
|
||||
return s.cfg.ForkChoiceStore
|
||||
}
|
||||
|
||||
// ClockProvider implements WaitForClock, yielding a clock type that can be used to get
|
||||
// the genesis time, slot values derived from genesis, or a possibly synthetic time.Now value
|
||||
type ClockProvider interface {
|
||||
WaitForClock(context.Context) (Clock, error)
|
||||
}
|
||||
|
||||
// WaitForClock will block until the Clock is ready. This provides a syncronization mechanism for services that
|
||||
// need the clock to be available before they can start running.
|
||||
func (s *Service) WaitForClock(ctx context.Context) (Clock, error) {
|
||||
if s.clockReady != nil {
|
||||
select {
|
||||
case <-s.clockReady:
|
||||
return s.clock, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
return s.clock, nil
|
||||
}
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
|
||||
// Ensure Service implements chain info interface.
|
||||
var _ ChainInfoFetcher = (*Service)(nil)
|
||||
var _ TimeFetcher = (*Service)(nil)
|
||||
var _ ForkFetcher = (*Service)(nil)
|
||||
|
||||
// prepareForkchoiceState prepares a beacon state with the given data to mock
|
||||
@@ -202,9 +201,9 @@ func TestHeadState_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenesisTime_CanRetrieve(t *testing.T) {
|
||||
c := &Service{genesisTime: time.Unix(999, 0)}
|
||||
c := &Service{clock: NewClock(time.Unix(999, 0))}
|
||||
wanted := time.Unix(999, 0)
|
||||
assert.Equal(t, wanted, c.GenesisTime(), "Did not get wanted genesis time")
|
||||
assert.Equal(t, wanted, c.genesisTime(), "Did not get wanted genesis time")
|
||||
}
|
||||
|
||||
func TestCurrentFork_CanRetrieve(t *testing.T) {
|
||||
@@ -304,6 +303,7 @@ func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
|
||||
root = c.HeadGenesisValidatorsRoot()
|
||||
require.DeepEqual(t, root[:], s.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
func TestService_ChainHeads_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}}
|
||||
@@ -474,7 +474,7 @@ func TestService_IsOptimistic_DoublyLinkedTree(t *testing.T) {
|
||||
|
||||
func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{genesisTime: time.Now()}
|
||||
c := &Service{clock: NewClock(time.Now())}
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, opt)
|
||||
|
||||
69
beacon-chain/blockchain/clock.go
Normal file
69
beacon-chain/blockchain/clock.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Clock abstracts important time-related concerns in the beacon chain:
|
||||
// - genesis time
|
||||
// - provides a time.Now() construct that can be overriden in tests
|
||||
// - syncronization point for code that needs to know the genesis time
|
||||
// - CurrentSlot: convenience conversion for current time -> slot
|
||||
// - support backwards compatibility with the TimeFetcher interface
|
||||
type Clock interface {
|
||||
GenesisTime() time.Time
|
||||
CurrentSlot() types.Slot
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
// clock is a type that fulfills the TimeFetcher interface. This can be used in a number of places where
|
||||
// blockchain.ChainInfoFetcher has historically been used.
|
||||
type clock struct {
|
||||
time.Time
|
||||
now Now
|
||||
}
|
||||
var _ Clock = &clock{}
|
||||
|
||||
// clock provides an accessor to the embedded time, also fulfilling the blockchain.TimeFetcher interface.
|
||||
func (gt clock) GenesisTime() time.Time {
|
||||
return gt.Time
|
||||
}
|
||||
|
||||
// CurrentSlot returns the current slot relative to the time.Time value clock embeds.
|
||||
func (gt clock) CurrentSlot() types.Slot {
|
||||
return slots.Duration(gt.Time, gt.now())
|
||||
}
|
||||
|
||||
// Now provides a value for time.Now() that can be overriden in tests.
|
||||
func (gt clock) Now() time.Time {
|
||||
return gt.now()
|
||||
}
|
||||
|
||||
// ClockOpt is a functional option to change the behavior of a clock value made by NewClock.
|
||||
// It is primarily intended as a way to inject an alternate time.Now() callback (WithNow) for testing.
|
||||
type ClockOpt func(*clock)
|
||||
|
||||
// WithNow allows tests in particular to inject an alternate implementation of time.Now (vs using system time)
|
||||
func WithNow(n Now) ClockOpt {
|
||||
return func(gt *clock) {
|
||||
gt.now = n
|
||||
}
|
||||
}
|
||||
|
||||
// NewClock constructs a clock value using the given time value. Optional ClockOpt can be provided.
|
||||
// If an implementation of the Now function type is not provided (via WithNow), time.Now (system time) will be used by default.
|
||||
func NewClock(t time.Time, opts ...ClockOpt) clock {
|
||||
gt := clock{Time: t}
|
||||
for _, o := range opts {
|
||||
o(>)
|
||||
}
|
||||
if gt.now == nil {
|
||||
gt.now = time.Now
|
||||
}
|
||||
return gt
|
||||
}
|
||||
|
||||
// Now is a function that can return the current time. This will be time.Now by default, but can be overridden for tests.
|
||||
type Now func() time.Time
|
||||
31
beacon-chain/blockchain/clock_test.go
Normal file
31
beacon-chain/blockchain/clock_test.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestClock_GenesisTime(t *testing.T) {
|
||||
n := time.Now()
|
||||
gt := NewClock(n)
|
||||
gtt := gt.GenesisTime()
|
||||
require.Equal(t, gt.Time, gtt)
|
||||
require.Equal(t, n, gtt)
|
||||
}
|
||||
|
||||
func TestWithNow(t *testing.T) {
|
||||
genUnix := time.Unix(0, 0)
|
||||
var expectedSlots uint64 = 7200 // a day worth of slots
|
||||
now := genUnix.Add(time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot * expectedSlots))
|
||||
fn := func() time.Time {
|
||||
return now
|
||||
}
|
||||
|
||||
gt := NewClock(genUnix, WithNow(fn))
|
||||
// in this scenario, "genesis" is exactly 24 hours before "now"
|
||||
// so "now" should be 7200 slots after "genesis"
|
||||
require.Equal(t, types.Slot(expectedSlots), gt.CurrentSlot())
|
||||
}
|
||||
@@ -4,7 +4,9 @@ import "github.com/pkg/errors"
|
||||
|
||||
var (
|
||||
// ErrInvalidPayload is returned when the payload is invalid
|
||||
ErrInvalidPayload = errors.New("recevied an INVALID payload from execution engine")
|
||||
ErrInvalidPayload = invalidBlock{error: errors.New("received an INVALID payload from execution engine")}
|
||||
// ErrInvalidBlockHashPayloadStatus is returned when the payload has invalid block hash.
|
||||
ErrInvalidBlockHashPayloadStatus = invalidBlock{error: errors.New("received an INVALID_BLOCK_HASH payload from execution engine")}
|
||||
// ErrUndefinedExecutionEngineError is returned when the execution engine returns an error that is not defined
|
||||
ErrUndefinedExecutionEngineError = errors.New("received an undefined ee error")
|
||||
// errNilFinalizedInStore is returned when a nil finalized checkpt is returned from store.
|
||||
@@ -28,7 +30,7 @@ var (
|
||||
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
|
||||
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
|
||||
// errNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
|
||||
errNotDescendantOfFinalized = invalidBlock{errors.New("not descendant of finalized checkpoint")}
|
||||
errNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
|
||||
)
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
@@ -38,17 +40,25 @@ var (
|
||||
// The block is deemed invalid according to execution layer client.
|
||||
// The block violates certain fork choice rules (before finalized slot, not finalized ancestor)
|
||||
type invalidBlock struct {
|
||||
invalidAncestorRoots [][32]byte
|
||||
error
|
||||
root [32]byte
|
||||
}
|
||||
|
||||
type invalidBlockError interface {
|
||||
Error() string
|
||||
InvalidBlock() bool
|
||||
InvalidAncestorRoots() [][32]byte
|
||||
BlockRoot() [32]byte
|
||||
}
|
||||
|
||||
// InvalidBlock returns true for `invalidBlock`.
|
||||
func (e invalidBlock) InvalidBlock() bool {
|
||||
return true
|
||||
// BlockRoot returns the invalid block root.
|
||||
func (e invalidBlock) BlockRoot() [32]byte {
|
||||
return e.root
|
||||
}
|
||||
|
||||
// InvalidAncestorRoots returns an optional list of invalid roots of the invalid block which leads up last valid root.
|
||||
func (e invalidBlock) InvalidAncestorRoots() [][32]byte {
|
||||
return e.invalidAncestorRoots
|
||||
}
|
||||
|
||||
// IsInvalidBlock returns true if the error has `invalidBlock`.
|
||||
@@ -56,9 +66,34 @@ func IsInvalidBlock(e error) bool {
|
||||
if e == nil {
|
||||
return false
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
_, ok := e.(invalidBlockError)
|
||||
if !ok {
|
||||
return IsInvalidBlock(errors.Unwrap(e))
|
||||
}
|
||||
return d.InvalidBlock()
|
||||
return true
|
||||
}
|
||||
|
||||
// InvalidBlockRoot returns the invalid block root. If the error
|
||||
// doesn't have an invalid blockroot. [32]byte{} is returned.
|
||||
func InvalidBlockRoot(e error) [32]byte {
|
||||
if e == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
if !ok {
|
||||
return [32]byte{}
|
||||
}
|
||||
return d.BlockRoot()
|
||||
}
|
||||
|
||||
// InvalidAncestorRoots returns a list of invalid roots up to last valid root.
|
||||
func InvalidAncestorRoots(e error) [][32]byte {
|
||||
if e == nil {
|
||||
return [][32]byte{}
|
||||
}
|
||||
d, ok := e.(invalidBlockError)
|
||||
if !ok {
|
||||
return [][32]byte{}
|
||||
}
|
||||
return d.InvalidAncestorRoots()
|
||||
}
|
||||
|
||||
@@ -8,10 +8,29 @@ import (
|
||||
)
|
||||
|
||||
func TestIsInvalidBlock(t *testing.T) {
|
||||
require.Equal(t, false, IsInvalidBlock(ErrInvalidPayload))
|
||||
err := invalidBlock{ErrInvalidPayload}
|
||||
require.Equal(t, true, IsInvalidBlock(ErrInvalidPayload)) // Already wrapped.
|
||||
err := invalidBlock{error: ErrInvalidPayload}
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
|
||||
newErr := errors.Wrap(err, "wrap me")
|
||||
require.Equal(t, true, IsInvalidBlock(newErr))
|
||||
require.DeepEqual(t, [][32]byte(nil), InvalidAncestorRoots(err))
|
||||
}
|
||||
|
||||
func TestInvalidBlockRoot(t *testing.T) {
|
||||
require.Equal(t, [32]byte{}, InvalidBlockRoot(ErrUndefinedExecutionEngineError))
|
||||
require.Equal(t, [32]byte{}, InvalidBlockRoot(ErrInvalidPayload))
|
||||
|
||||
err := invalidBlock{error: ErrInvalidPayload, root: [32]byte{'a'}}
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
|
||||
require.DeepEqual(t, [][32]byte(nil), InvalidAncestorRoots(err))
|
||||
}
|
||||
|
||||
func TestInvalidRoots(t *testing.T) {
|
||||
roots := [][32]byte{{'d'}, {'b'}, {'c'}}
|
||||
err := invalidBlock{error: ErrInvalidPayload, root: [32]byte{'a'}, invalidAncestorRoots: roots}
|
||||
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
|
||||
require.DeepEqual(t, roots, InvalidAncestorRoots(err))
|
||||
}
|
||||
|
||||
@@ -39,24 +39,27 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
|
||||
headBlk := arg.headBlock
|
||||
if headBlk == nil || headBlk.IsNil() || headBlk.Body().IsNil() {
|
||||
return nil, errors.New("nil head block")
|
||||
log.Error("Head block is nil")
|
||||
return nil, nil
|
||||
}
|
||||
// Must not call fork choice updated until the transition conditions are met on the Pow network.
|
||||
isExecutionBlk, err := blocks.IsExecutionBlock(headBlk.Body())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not determine if block is execution block")
|
||||
log.WithError(err).Error("Could not determine if head block is execution block")
|
||||
return nil, nil
|
||||
}
|
||||
if !isExecutionBlk {
|
||||
return nil, nil
|
||||
}
|
||||
headPayload, err := headBlk.Body().ExecutionPayload()
|
||||
headPayload, err := headBlk.Body().Execution()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload")
|
||||
log.WithError(err).Error("Could not get execution payload for head block")
|
||||
return nil, nil
|
||||
}
|
||||
finalizedHash := s.ForkChoicer().FinalizedPayloadBlockHash()
|
||||
justifiedHash := s.ForkChoicer().JustifiedPayloadBlockHash()
|
||||
fcs := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: headPayload.BlockHash,
|
||||
HeadBlockHash: headPayload.BlockHash(),
|
||||
SafeBlockHash: justifiedHash[:],
|
||||
FinalizedBlockHash: finalizedHash[:],
|
||||
}
|
||||
@@ -64,7 +67,8 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
|
||||
hasAttr, attr, proposerId, err := s.getPayloadAttribute(ctx, arg.headState, nextSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get payload attribute")
|
||||
log.WithError(err).Error("Could not get head payload attribute")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
|
||||
@@ -74,32 +78,41 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
forkchoiceUpdatedOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"headSlot": headBlk.Slot(),
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash())),
|
||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
|
||||
}).Info("Called fork choice updated with optimistic block")
|
||||
return payloadID, s.optimisticCandidateBlock(ctx, headBlk)
|
||||
err := s.optimisticCandidateBlock(ctx, headBlk)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Optimistic block failed to be candidate")
|
||||
}
|
||||
return payloadID, nil
|
||||
case powchain.ErrInvalidPayloadStatus:
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
headRoot := arg.headRoot
|
||||
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, headRoot, bytesutil.ToBytes32(headBlk.ParentRoot()), bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.WithError(err).Error("Could not set head root to invalid")
|
||||
return nil, nil
|
||||
}
|
||||
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
|
||||
return nil, err
|
||||
log.WithError(err).Error("Could not remove invalid block and state")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
r, err := s.cfg.ForkChoiceStore.Head(ctx, s.justifiedBalances.balances)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.WithError(err).Error("Could not get head root")
|
||||
return nil, nil
|
||||
}
|
||||
b, err := s.getBlock(ctx, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.WithError(err).Error("Could not get head block")
|
||||
return nil, nil
|
||||
}
|
||||
st, err := s.cfg.StateGen.StateByRoot(ctx, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.WithError(err).Error("Could not get head state")
|
||||
return nil, nil
|
||||
}
|
||||
pid, err := s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: st,
|
||||
@@ -107,7 +120,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
headBlock: b.Block(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, err // Returning err because it's recursive here.
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, r, b, st); err != nil {
|
||||
@@ -120,20 +133,27 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
"invalidCount": len(invalidRoots),
|
||||
"newHeadRoot": fmt.Sprintf("%#x", bytesutil.Trunc(r[:])),
|
||||
}).Warn("Pruned invalid blocks")
|
||||
return pid, ErrInvalidPayload
|
||||
return pid, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot, invalidAncestorRoots: invalidRoots}
|
||||
|
||||
default:
|
||||
return nil, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
log.WithError(err).Error(ErrUndefinedExecutionEngineError)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
forkchoiceUpdatedValidNodeCount.Inc()
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, arg.headRoot); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set block to valid")
|
||||
log.WithError(err).Error("Could not set head root to valid")
|
||||
return nil, nil
|
||||
}
|
||||
if hasAttr { // If the forkchoice update call has an attribute, update the proposer payload ID cache.
|
||||
if hasAttr && payloadID != nil { // If the forkchoice update call has an attribute, update the proposer payload ID cache.
|
||||
var pId [8]byte
|
||||
copy(pId[:], payloadID[:])
|
||||
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId)
|
||||
} else if hasAttr && payloadID == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
|
||||
"slot": headBlk.Slot(),
|
||||
}).Error("Received nil payload ID on VALID engine response")
|
||||
}
|
||||
return payloadID, nil
|
||||
}
|
||||
@@ -148,11 +168,11 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
|
||||
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
|
||||
return params.BeaconConfig().ZeroHash, nil
|
||||
}
|
||||
payload, err := blk.Block().Body().ExecutionPayload()
|
||||
payload, err := blk.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
return bytesutil.ToBytes32(payload.BlockHash), nil
|
||||
return bytesutil.ToBytes32(payload.BlockHash()), nil
|
||||
}
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine on a new payload.
|
||||
@@ -173,14 +193,14 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
body := blk.Block().Body()
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(postStateHeader, body)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{err}, "could not determine if execution is enabled")
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
|
||||
}
|
||||
if !enabled {
|
||||
return true, nil
|
||||
}
|
||||
payload, err := body.ExecutionPayload()
|
||||
payload, err := body.Execution()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{err}, "could not get execution payload")
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not get execution payload")
|
||||
}
|
||||
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
||||
switch err {
|
||||
@@ -191,7 +211,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
return false, s.optimisticCandidateBlock(ctx, blk.Block())
|
||||
case powchain.ErrInvalidPayloadStatus:
|
||||
@@ -212,7 +232,13 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
"blockRoot": fmt.Sprintf("%#x", root),
|
||||
"invalidCount": len(invalidRoots),
|
||||
}).Warn("Pruned invalid blocks")
|
||||
return false, invalidBlock{ErrInvalidPayload}
|
||||
return false, invalidBlock{
|
||||
invalidAncestorRoots: invalidRoots,
|
||||
error: ErrInvalidPayload,
|
||||
}
|
||||
case powchain.ErrInvalidBlockHashPayloadStatus:
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
return false, ErrInvalidBlockHashPayloadStatus
|
||||
default:
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
@@ -289,7 +315,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
}
|
||||
|
||||
// Get timestamp.
|
||||
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
|
||||
t, err := slots.ToTime(uint64(s.genesisTime().Unix()), slot)
|
||||
if err != nil {
|
||||
return false, nil, 0, err
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
@@ -74,10 +75,6 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
newForkchoiceErr error
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
name: "nil block",
|
||||
errString: "nil head block",
|
||||
},
|
||||
{
|
||||
name: "phase0 block",
|
||||
blk: func() interfaces.BeaconBlock {
|
||||
@@ -191,6 +188,10 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
_, err = service.notifyForkchoiceUpdate(ctx, arg)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
if tt.errString == ErrInvalidPayload.Error() {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, tt.headRoot, InvalidBlockRoot(err)) // Head root should be invalid. Not block root!
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -329,7 +330,9 @@ func Test_NotifyForkchoiceUpdateRecursive_Protoarray(t *testing.T) {
|
||||
headRoot: brg,
|
||||
}
|
||||
_, err = service.notifyForkchoiceUpdate(ctx, a)
|
||||
require.ErrorIs(t, ErrInvalidPayload, err)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, brf, InvalidBlockRoot(err))
|
||||
|
||||
// Ensure Head is D
|
||||
headRoot, err = fcs.Head(ctx, service.justifiedBalances.balances)
|
||||
require.NoError(t, err)
|
||||
@@ -472,7 +475,9 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
headRoot: brg,
|
||||
}
|
||||
_, err = service.notifyForkchoiceUpdate(ctx, a)
|
||||
require.ErrorIs(t, ErrInvalidPayload, err)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, brf, InvalidBlockRoot(err))
|
||||
|
||||
// Ensure Head is D
|
||||
headRoot, err = fcs.Head(ctx, service.justifiedBalances.balances)
|
||||
require.NoError(t, err)
|
||||
@@ -530,7 +535,7 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
service.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
service.SetGenesisTime(time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
r, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
@@ -673,16 +678,40 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
newPayloadErr: ErrUndefinedExecutionEngineError,
|
||||
errString: ErrUndefinedExecutionEngineError.Error(),
|
||||
},
|
||||
{
|
||||
name: "invalid block hash error from ee",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.SignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
newPayloadErr: ErrInvalidBlockHashPayloadStatus,
|
||||
errString: ErrInvalidBlockHashPayloadStatus.Error(),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
e := &mockPOW.EngineClient{ErrNewPayload: tt.newPayloadErr, BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("b")),
|
||||
},
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
e.BlockByHashMap[[32]byte{'b'}] = &v1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("3")),
|
||||
},
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
@@ -735,11 +764,15 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
e := &mockPOW.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("b")),
|
||||
},
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
e.BlockByHashMap[[32]byte{'b'}] = &v1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("3")),
|
||||
},
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
@@ -767,7 +800,7 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
params.BeaconConfig().SafeSlotsToImportOptimistically = 128
|
||||
service.genesisTime = time.Now().Add(-time.Second * 12 * 2 * 128)
|
||||
service.SetGenesisTime(time.Now().Add(-time.Second * 12 * 2 * 128))
|
||||
|
||||
parentBlk := util.NewBeaconBlockBellatrix()
|
||||
wrappedParentBlock, err := wrapper.WrappedSignedBeaconBlock(parentBlk)
|
||||
@@ -870,7 +903,7 @@ func Test_IsOptimisticShallowExecutionParent(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
params.BeaconConfig().SafeSlotsToImportOptimistically = 128
|
||||
service.genesisTime = time.Now().Add(-time.Second * 12 * 2 * 128)
|
||||
service.SetGenesisTime(time.Now().Add(-time.Second * 12 * 2 * 128))
|
||||
payload := &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
|
||||
@@ -220,7 +220,7 @@ func (s *Service) headBlock() interfaces.SignedBeaconBlock {
|
||||
// It does a full copy on head state for immutability.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headState(ctx context.Context) state.BeaconState {
|
||||
_, span := trace.StartSpan(ctx, "blockChain.headState")
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.headState")
|
||||
defer span.End()
|
||||
|
||||
return s.head.state.Copy()
|
||||
|
||||
@@ -219,7 +219,7 @@ func TestSaveOrphanedAtts_NoCommonAncestor(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
service.SetGenesisTime(time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2 -- 3
|
||||
@@ -273,7 +273,7 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
service.SetGenesisTime(time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2 -- 3
|
||||
@@ -339,7 +339,7 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
service.SetGenesisTime(time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2
|
||||
@@ -393,7 +393,7 @@ func TestSaveOrphanedAtts_NoCommonAncestor_DoublyLinkedTrie(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
service.SetGenesisTime(time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2 -- 3
|
||||
@@ -452,7 +452,7 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
service.SetGenesisTime(time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2 -- 3
|
||||
@@ -522,7 +522,7 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
service.SetGenesisTime(time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second))
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2
|
||||
|
||||
@@ -29,7 +29,7 @@ func TestService_getBlock(t *testing.T) {
|
||||
// block in cache
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(b1)
|
||||
require.NoError(t, err)
|
||||
s.saveInitSyncBlock(ctx, r1, b)
|
||||
require.NoError(t, s.saveInitSyncBlock(ctx, r1, b))
|
||||
got, err := s.getBlock(ctx, r1)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, b, got)
|
||||
@@ -59,7 +59,7 @@ func TestService_hasBlockInInitSyncOrDB(t *testing.T) {
|
||||
// block in cache
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(b1)
|
||||
require.NoError(t, err)
|
||||
s.saveInitSyncBlock(ctx, r1, b)
|
||||
require.NoError(t, s.saveInitSyncBlock(ctx, r1, b))
|
||||
require.Equal(t, true, s.hasBlockInInitSyncOrDB(ctx, r1))
|
||||
|
||||
// block in db
|
||||
|
||||
@@ -45,12 +45,16 @@ func logStateTransitionData(b interfaces.BeaconBlock) error {
|
||||
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
|
||||
}
|
||||
if b.Version() == version.Bellatrix {
|
||||
p, err := b.Body().ExecutionPayload()
|
||||
p, err := b.Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash)))
|
||||
log = log.WithField("txCount", len(p.Transactions))
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
|
||||
txs, err := p.Transactions()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("txCount", len(txs))
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
return nil
|
||||
@@ -98,18 +102,18 @@ func logPayload(block interfaces.BeaconBlock) error {
|
||||
if !isExecutionBlk {
|
||||
return nil
|
||||
}
|
||||
payload, err := block.Body().ExecutionPayload()
|
||||
payload, err := block.Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.GasLimit == 0 {
|
||||
if payload.GasLimit() == 0 {
|
||||
return errors.New("gas limit should not be 0")
|
||||
}
|
||||
gasUtilized := float64(payload.GasUsed) / float64(payload.GasLimit)
|
||||
gasUtilized := float64(payload.GasUsed()) / float64(payload.GasLimit())
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash)),
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash())),
|
||||
"blockNumber": payload.BlockNumber,
|
||||
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
|
||||
}).Debug("Synced new payload")
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestReportEpochMetrics_SlashedValidatorOutOfBound(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
v.Slashed = true
|
||||
require.NoError(t, h.UpdateValidatorAtIndex(0, v))
|
||||
require.NoError(t, h.AppendCurrentEpochAttestations(ð.PendingAttestation{InclusionDelay: 1, Data: util.NewAttestationUtil().HydrateAttestationData(ð.AttestationData{})}))
|
||||
require.NoError(t, h.AppendCurrentEpochAttestations(ð.PendingAttestation{InclusionDelay: 1, Data: util.HydrateAttestationData(ð.AttestationData{})}))
|
||||
err = reportEpochMetrics(context.Background(), h, h)
|
||||
require.ErrorContains(t, "slot 0 out of bounds", err)
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -41,17 +40,17 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
|
||||
if err := wrapper.BeaconBlockIsNil(b); err != nil {
|
||||
return err
|
||||
}
|
||||
payload, err := b.Block().Body().ExecutionPayload()
|
||||
payload, err := b.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload == nil {
|
||||
if payload.IsNil() {
|
||||
return errors.New("nil execution payload")
|
||||
}
|
||||
if err := validateTerminalBlockHash(b.Block().Slot(), payload); err != nil {
|
||||
return errors.Wrap(err, "could not validate terminal block hash")
|
||||
}
|
||||
mergeBlockParentHash, mergeBlockTD, err := s.getBlkParentHashAndTD(ctx, payload.ParentHash)
|
||||
mergeBlockParentHash, mergeBlockTD, err := s.getBlkParentHashAndTD(ctx, payload.ParentHash())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get merge block parent hash and total difficulty")
|
||||
}
|
||||
@@ -66,12 +65,12 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
|
||||
if !valid {
|
||||
err := fmt.Errorf("invalid TTD, configTTD: %s, currentTTD: %s, parentTTD: %s",
|
||||
params.BeaconConfig().TerminalTotalDifficulty, mergeBlockTD, mergeBlockParentTD)
|
||||
return invalidBlock{err}
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Block().Slot(),
|
||||
"mergeBlockHash": common.BytesToHash(payload.ParentHash).String(),
|
||||
"mergeBlockHash": common.BytesToHash(payload.ParentHash()).String(),
|
||||
"mergeBlockParentHash": common.BytesToHash(mergeBlockParentHash).String(),
|
||||
"terminalTotalDifficulty": params.BeaconConfig().TerminalTotalDifficulty,
|
||||
"mergeBlockTotalDifficulty": mergeBlockTD,
|
||||
@@ -85,7 +84,7 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
|
||||
|
||||
// getBlkParentHashAndTD retrieves the parent hash and total difficulty of the given block.
|
||||
func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]byte, *uint256.Int, error) {
|
||||
blk, err := s.cfg.ExecutionEngineCaller.ExecutionBlockByHash(ctx, common.BytesToHash(blkHash))
|
||||
blk, err := s.cfg.ExecutionEngineCaller.ExecutionBlockByHash(ctx, common.BytesToHash(blkHash), false /* no txs */)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get pow block")
|
||||
}
|
||||
@@ -100,7 +99,7 @@ func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]
|
||||
if overflows {
|
||||
return nil, nil, errors.New("total difficulty overflows")
|
||||
}
|
||||
return blk.ParentHash, blkTDUint256, nil
|
||||
return blk.ParentHash[:], blkTDUint256, nil
|
||||
}
|
||||
|
||||
// validateTerminalBlockHash validates if the merge block is a valid terminal PoW block.
|
||||
@@ -110,14 +109,14 @@ func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]
|
||||
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
|
||||
// return
|
||||
func validateTerminalBlockHash(blkSlot types.Slot, payload *enginev1.ExecutionPayload) error {
|
||||
func validateTerminalBlockHash(blkSlot types.Slot, payload interfaces.ExecutionData) error {
|
||||
if bytesutil.ToBytes32(params.BeaconConfig().TerminalBlockHash.Bytes()) == [32]byte{} {
|
||||
return nil
|
||||
}
|
||||
if params.BeaconConfig().TerminalBlockHashActivationEpoch > slots.ToEpoch(blkSlot) {
|
||||
return errors.New("terminal block hash activation epoch not reached")
|
||||
}
|
||||
if !bytes.Equal(payload.ParentHash, params.BeaconConfig().TerminalBlockHash.Bytes()) {
|
||||
if !bytes.Equal(payload.ParentHash(), params.BeaconConfig().TerminalBlockHash.Bytes()) {
|
||||
return errors.New("parent hash does not match terminal block hash")
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -6,12 +6,12 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/holiman/uint256"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
mocks "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
@@ -120,12 +120,19 @@ func Test_validateMergeBlock(t *testing.T) {
|
||||
|
||||
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
engine.BlockByHashMap[[32]byte{'a'}] = &enginev1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
||||
a := [32]byte{'a'}
|
||||
b := [32]byte{'b'}
|
||||
mergeBlockParentHash := [32]byte{'3'}
|
||||
engine.BlockByHashMap[a] = &enginev1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: b,
|
||||
},
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
engine.BlockByHashMap[[32]byte{'b'}] = &enginev1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
||||
engine.BlockByHashMap[b] = &enginev1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: mergeBlockParentHash,
|
||||
},
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
@@ -133,18 +140,18 @@ func Test_validateMergeBlock(t *testing.T) {
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
ParentHash: a[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
bk, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.validateMergeBlock(ctx, b))
|
||||
require.NoError(t, service.validateMergeBlock(ctx, bk))
|
||||
|
||||
cfg.TerminalTotalDifficulty = "1"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
err = service.validateMergeBlock(ctx, b)
|
||||
err = service.validateMergeBlock(ctx, bk)
|
||||
require.ErrorContains(t, "invalid TTD, configTTD: 1, currentTTD: 2, parentTTD: 1", err)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
@@ -167,7 +174,9 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
p := [32]byte{'b'}
|
||||
td := "0x1"
|
||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
||||
ParentHash: p[:],
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: p,
|
||||
},
|
||||
TotalDifficulty: td,
|
||||
}
|
||||
parentHash, totalDifficulty, err := service.getBlkParentHashAndTD(ctx, h[:])
|
||||
@@ -183,14 +192,18 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
require.ErrorContains(t, "pow block is nil", err)
|
||||
|
||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
||||
ParentHash: p[:],
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: p,
|
||||
},
|
||||
TotalDifficulty: "1",
|
||||
}
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
||||
require.ErrorContains(t, "could not decode merge block total difficulty: hex string without 0x prefix", err)
|
||||
|
||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
||||
ParentHash: p[:],
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: p,
|
||||
},
|
||||
TotalDifficulty: "0XFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
|
||||
}
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
||||
@@ -198,16 +211,22 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_validateTerminalBlockHash(t *testing.T) {
|
||||
require.NoError(t, validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
||||
wrapped, err := wrapper.WrappedExecutionPayload(&enginev1.ExecutionPayload{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, validateTerminalBlockHash(1, wrapped))
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalBlockHash = [32]byte{0x01}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
require.ErrorContains(t, "terminal block hash activation epoch not reached", validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
||||
require.ErrorContains(t, "terminal block hash activation epoch not reached", validateTerminalBlockHash(1, wrapped))
|
||||
|
||||
cfg.TerminalBlockHashActivationEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
require.ErrorContains(t, "parent hash does not match terminal block hash", validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
||||
require.ErrorContains(t, "parent hash does not match terminal block hash", validateTerminalBlockHash(1, wrapped))
|
||||
|
||||
require.NoError(t, validateTerminalBlockHash(1, &enginev1.ExecutionPayload{ParentHash: cfg.TerminalBlockHash.Bytes()}))
|
||||
wrapped, err = wrapper.WrappedExecutionPayload(&enginev1.ExecutionPayload{
|
||||
ParentHash: cfg.TerminalBlockHash.Bytes(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, validateTerminalBlockHash(1, wrapped))
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation) error
|
||||
return err
|
||||
}
|
||||
|
||||
genesisTime := uint64(s.genesisTime.Unix())
|
||||
genesisTime := uint64(s.genesisTime().Unix())
|
||||
|
||||
// Verify attestation target is from current epoch or previous epoch.
|
||||
if err := verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Unix()), tgt); err != nil {
|
||||
|
||||
@@ -70,7 +70,6 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, blkWithValidStateRoot))
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
tests := []struct {
|
||||
name string
|
||||
a *ethpb.Attestation
|
||||
@@ -78,17 +77,17 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "attestation's data slot not aligned with target vote",
|
||||
a: au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}),
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}),
|
||||
wantedErr: "slot 32 does not match target epoch 0",
|
||||
},
|
||||
{
|
||||
name: "no pre state for attestations's target block",
|
||||
a: au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
|
||||
wantedErr: "could not get pre state for epoch 0",
|
||||
},
|
||||
{
|
||||
name: "process attestation doesn't match current epoch",
|
||||
a: au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
Root: BlkWithStateBadAttRoot[:]}}}),
|
||||
wantedErr: "target epoch 100 does not match current epoch",
|
||||
},
|
||||
@@ -177,7 +176,6 @@ func TestStore_OnAttestation_ErrorConditions_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, blkWithValidStateRoot))
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
tests := []struct {
|
||||
name string
|
||||
a *ethpb.Attestation
|
||||
@@ -185,17 +183,17 @@ func TestStore_OnAttestation_ErrorConditions_DoublyLinkedTree(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "attestation's data slot not aligned with target vote",
|
||||
a: au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}),
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}),
|
||||
wantedErr: "slot 32 does not match target epoch 0",
|
||||
},
|
||||
{
|
||||
name: "no pre state for attestations's target block",
|
||||
a: au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
|
||||
wantedErr: "could not get pre state for epoch 0",
|
||||
},
|
||||
{
|
||||
name: "process attestation doesn't match current epoch",
|
||||
a: au.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
Root: BlkWithStateBadAttRoot[:]}}}),
|
||||
wantedErr: "target epoch 100 does not match current epoch",
|
||||
},
|
||||
@@ -251,7 +249,7 @@ func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
att, err := util.NewAttestationUtil().GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
@@ -281,7 +279,7 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
att, err := util.NewAttestationUtil().GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
@@ -424,7 +422,8 @@ func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
d := util.NewAttestationUtil().HydrateAttestationData(ðpb.AttestationData{})
|
||||
|
||||
d := util.HydrateAttestationData(ðpb.AttestationData{})
|
||||
require.Equal(t, errBlockNotFoundInCacheOrDB, service.verifyBeaconBlock(ctx, d))
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/forks/bellatrix"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
@@ -97,7 +96,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
|
||||
defer span.End()
|
||||
if err := wrapper.BeaconBlockIsNil(signed); err != nil {
|
||||
return invalidBlock{err}
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
b := signed.Block()
|
||||
|
||||
@@ -118,7 +117,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
}
|
||||
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return invalidBlock{err}
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
|
||||
if err != nil {
|
||||
@@ -126,15 +125,19 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
}
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, postStateVersion, postStateHeader, signed)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not verify new payload: %v", err)
|
||||
return errors.Wrap(err, "could not validate new payload")
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preStateVersion, preStateHeader, signed); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
|
||||
return err
|
||||
|
||||
if err := s.cfg.BeaconDB.SaveBlock(ctx, signed); err != nil {
|
||||
return errors.Wrapf(err, "could not save block from slot %d", signed.Block().Slot())
|
||||
}
|
||||
if err := s.cfg.StateGen.SaveState(ctx, blockRoot, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
|
||||
@@ -184,7 +187,9 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
s.notifyEngineIfChangedHead(ctx, headRoot)
|
||||
if err := s.notifyEngineIfChangedHead(ctx, headRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.pruneCanonicalAttsFromPool(ctx, blockRoot, signed); err != nil {
|
||||
return err
|
||||
@@ -293,7 +298,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
}
|
||||
|
||||
if err := wrapper.BeaconBlockIsNil(blks[0]); err != nil {
|
||||
return invalidBlock{err}
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
b := blks[0].Block()
|
||||
|
||||
@@ -341,7 +346,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
|
||||
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
|
||||
if err != nil {
|
||||
return invalidBlock{err}
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
// Save potential boundary states.
|
||||
if slots.IsEpochStart(preState.Slot()) {
|
||||
@@ -362,7 +367,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
}
|
||||
verify, err := sigSet.Verify()
|
||||
if err != nil {
|
||||
return invalidBlock{err}
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
if !verify {
|
||||
return errors.New("batch block signature verification failed")
|
||||
@@ -537,20 +542,6 @@ func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashing
|
||||
}
|
||||
}
|
||||
|
||||
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
|
||||
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
|
||||
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock, st state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
|
||||
defer span.End()
|
||||
if err := s.cfg.BeaconDB.SaveBlock(ctx, b); err != nil {
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Block().Slot())
|
||||
}
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This removes the attestations from the mem pool. It will only remove the attestations if input root `r` is canonical,
|
||||
// meaning the block `b` is part of the canonical chain.
|
||||
func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock) error {
|
||||
@@ -585,11 +576,15 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
||||
}
|
||||
|
||||
// Skip validation if block has an empty payload.
|
||||
payload, err := blk.Block().Body().ExecutionPayload()
|
||||
payload, err := blk.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return invalidBlock{err}
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
if bellatrix.IsEmptyPayload(payload) {
|
||||
isEmpty, err := wrapper.IsEmptyExecutionData(payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isEmpty {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -600,11 +595,16 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
||||
}
|
||||
|
||||
// Skip validation if the block is not a merge transition block.
|
||||
atTransition, err := blocks.IsMergeTransitionBlockUsingPreStatePayloadHeader(stateHeader, blk.Block().Body())
|
||||
// To reach here. The payload must be non-empty. If the state header is empty then it's at transition.
|
||||
wh, err := wrapper.WrappedExecutionPayloadHeader(stateHeader)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if merge block is terminal")
|
||||
return err
|
||||
}
|
||||
if !atTransition {
|
||||
empty, err := wrapper.IsEmptyExecutionData(wh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !empty {
|
||||
return nil
|
||||
}
|
||||
return s.validateMergeBlock(ctx, blk)
|
||||
|
||||
@@ -24,7 +24,11 @@ import (
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
func (s *Service) CurrentSlot() types.Slot {
|
||||
return slots.CurrentSlot(uint64(s.genesisTime.Unix()))
|
||||
c, err := s.WaitForClock(context.TODO())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return c.CurrentSlot()
|
||||
}
|
||||
|
||||
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
@@ -48,7 +52,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.BeaconBlock
|
||||
}
|
||||
|
||||
// Verify block slot time is not from the future.
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), b.Slot(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime().Unix()), b.Slot(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -114,7 +118,7 @@ func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byt
|
||||
bytesutil.Trunc(root[:]), fSlot, bytesutil.Trunc(bFinalizedRoot),
|
||||
bytesutil.Trunc(fRoot[:]))
|
||||
tracing.AnnotateError(span, err)
|
||||
return invalidBlock{err}
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -129,7 +133,7 @@ func (s *Service) verifyBlkFinalizedSlot(b interfaces.BeaconBlock) error {
|
||||
}
|
||||
if finalizedSlot >= b.Slot() {
|
||||
err = fmt.Errorf("block is equal or earlier than finalized block, slot %d < slot %d", b.Slot(), finalizedSlot)
|
||||
return invalidBlock{err}
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/pkg/errors"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
@@ -777,7 +779,7 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling_DoublyLinkedTree(t *testin
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.ErrorIs(t, errNotDescendantOfFinalized, err)
|
||||
require.Equal(t, errNotDescendantOfFinalized.Error(), err.Error())
|
||||
}
|
||||
|
||||
// blockTree1 constructs the following tree:
|
||||
@@ -871,7 +873,7 @@ func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byt
|
||||
}
|
||||
|
||||
func TestCurrentSlot_HandlesOverflow(t *testing.T) {
|
||||
svc := Service{genesisTime: prysmTime.Now().Add(1 * time.Hour)}
|
||||
svc := Service{clock: NewClock(prysmTime.Now().Add(1 * time.Hour))}
|
||||
|
||||
slot := svc.CurrentSlot()
|
||||
require.Equal(t, types.Slot(0), slot, "Unexpected slot")
|
||||
@@ -1487,9 +1489,11 @@ func Test_getStateVersionAndPayload(t *testing.T) {
|
||||
name: "bellatrix state",
|
||||
st: func() state.BeaconState {
|
||||
s, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
require.NoError(t, s.SetLatestExecutionPayloadHeader(&enginev1.ExecutionPayloadHeader{
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(&enginev1.ExecutionPayloadHeader{
|
||||
BlockNumber: 1,
|
||||
}))
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
return s
|
||||
}(),
|
||||
version: version.Bellatrix,
|
||||
@@ -1527,6 +1531,9 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
aHash := common.BytesToHash([]byte("a"))
|
||||
bHash := common.BytesToHash([]byte("b"))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
stateVersion int
|
||||
@@ -1538,6 +1545,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
name: "state older than Bellatrix, nil payload",
|
||||
stateVersion: 1,
|
||||
payload: nil,
|
||||
errString: "attempted to wrap nil",
|
||||
},
|
||||
{
|
||||
name: "state older than Bellatrix, empty payload",
|
||||
@@ -1557,13 +1565,14 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
name: "state older than Bellatrix, non empty payload",
|
||||
stateVersion: 1,
|
||||
payload: &enginev1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
ParentHash: aHash[:],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "state is Bellatrix, nil payload",
|
||||
stateVersion: 2,
|
||||
payload: nil,
|
||||
errString: "attempted to wrap nil",
|
||||
},
|
||||
{
|
||||
name: "state is Bellatrix, empty payload",
|
||||
@@ -1583,7 +1592,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
name: "state is Bellatrix, non empty payload, empty header",
|
||||
stateVersion: 2,
|
||||
payload: &enginev1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
ParentHash: aHash[:],
|
||||
},
|
||||
header: &enginev1.ExecutionPayloadHeader{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
@@ -1601,7 +1610,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
name: "state is Bellatrix, non empty payload, non empty header",
|
||||
stateVersion: 2,
|
||||
payload: &enginev1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
ParentHash: aHash[:],
|
||||
},
|
||||
header: &enginev1.ExecutionPayloadHeader{
|
||||
BlockNumber: 1,
|
||||
@@ -1611,20 +1620,24 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
name: "state is Bellatrix, non empty payload, nil header",
|
||||
stateVersion: 2,
|
||||
payload: &enginev1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
ParentHash: aHash[:],
|
||||
},
|
||||
errString: "nil header or block body",
|
||||
errString: "attempted to wrap nil object",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
e := &mockPOW.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &enginev1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
||||
e.BlockByHashMap[aHash] = &enginev1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: bHash,
|
||||
},
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
e.BlockByHashMap[[32]byte{'b'}] = &enginev1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
||||
e.BlockByHashMap[bHash] = &enginev1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("3")),
|
||||
},
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
@@ -1655,9 +1668,8 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 100)
|
||||
att1 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
@@ -1672,7 +1684,7 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
att2 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att2 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
@@ -38,7 +36,7 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := slots.ValidateClock(ss, uint64(s.genesisTime.Unix())); err != nil {
|
||||
if err := slots.ValidateClock(ss, uint64(s.genesisTime().Unix())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.getAttPreState(ctx, target)
|
||||
@@ -102,19 +100,14 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
|
||||
break
|
||||
}
|
||||
|
||||
if s.genesisTime.IsZero() {
|
||||
log.Warn("ProcessAttestations routine waiting for genesis time")
|
||||
for s.genesisTime.IsZero() {
|
||||
if err := s.ctx.Err(); err != nil {
|
||||
log.WithError(err).Error("Giving up waiting for genesis time")
|
||||
return
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
log.Warn("ProcessAttestations routine waiting for genesis time")
|
||||
c, err := s.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("timeout waiting for genesis time in spawnProcessAttestationsRoutine")
|
||||
}
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
|
||||
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
st := slots.NewSlotTicker(c.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
@@ -166,33 +159,35 @@ func (s *Service) UpdateHead(ctx context.Context) error {
|
||||
}).Debug("Head changed due to attestations")
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
s.notifyEngineIfChangedHead(ctx, newHeadRoot)
|
||||
if err := s.notifyEngineIfChangedHead(ctx, newHeadRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This calls notify Forkchoice Update in the event that the head has changed
|
||||
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) {
|
||||
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) error {
|
||||
s.headLock.RLock()
|
||||
if newHeadRoot == [32]byte{} || s.headRoot() == newHeadRoot {
|
||||
s.headLock.RUnlock()
|
||||
return
|
||||
return nil
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
|
||||
if !s.hasBlockInInitSyncOrDB(ctx, newHeadRoot) {
|
||||
log.Debug("New head does not exist in DB. Do nothing")
|
||||
return // We don't have the block, don't notify the engine and update head.
|
||||
return nil // We don't have the block, don't notify the engine and update head.
|
||||
}
|
||||
|
||||
newHeadBlock, err := s.getBlock(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get new head block")
|
||||
return
|
||||
return nil
|
||||
}
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get state from db")
|
||||
return
|
||||
return nil
|
||||
}
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
headState: headState,
|
||||
@@ -201,11 +196,12 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdate(s.ctx, arg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not notify forkchoice update")
|
||||
return err
|
||||
}
|
||||
if err := s.saveHead(ctx, newHeadRoot, newHeadBlock, headState); err != nil {
|
||||
log.WithError(err).Error("could not save head")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
@@ -216,7 +212,7 @@ func (s *Service) processAttestations(ctx context.Context) {
|
||||
// This delays consideration in the fork choice until their slot is in the past.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
|
||||
nextSlot := a.Data.Slot + 1
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), nextSlot, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
if err := slots.VerifyTime(uint64(s.genesisTime().Unix()), nextSlot, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -230,7 +226,7 @@ func (s *Service) processAttestations(ctx context.Context) {
|
||||
log.WithError(err).Error("Could not delete fork choice attestation in pool")
|
||||
}
|
||||
|
||||
if !helpers.VerifyCheckpointEpoch(a.Data.Target, s.genesisTime) {
|
||||
if !helpers.VerifyCheckpointEpoch(a.Data.Target, s.genesisTime()) {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
chainService.genesisTime = time.Now()
|
||||
chainService.SetGenesisTime(time.Now())
|
||||
|
||||
e := types.Epoch(slots.MaxSlotBuffer/uint64(params.BeaconConfig().SlotsPerEpoch) + 1)
|
||||
_, err := chainService.AttestationTargetState(context.Background(), ðpb.Checkpoint{Epoch: e})
|
||||
@@ -60,7 +60,7 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := "FFG and LMD votes are not consistent"
|
||||
a := util.NewAttestationUtil().NewAttestation()
|
||||
a := util.NewAttestation()
|
||||
a.Data.Target.Epoch = 1
|
||||
a.Data.Target.Root = []byte{'a'}
|
||||
a.Data.BeaconBlockRoot = r33[:]
|
||||
@@ -85,7 +85,8 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b33)
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
a := util.NewAttestationUtil().NewAttestation()
|
||||
|
||||
a := util.NewAttestation()
|
||||
a.Data.Target.Epoch = 1
|
||||
a.Data.Target.Root = r32[:]
|
||||
a.Data.BeaconBlockRoot = r33[:]
|
||||
@@ -101,11 +102,11 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
service.SetGenesisTime(prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
atts, err := util.NewAttestationUtil().GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(atts[0].Data.Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
@@ -131,7 +132,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
service.notifyEngineIfChangedHead(ctx, service.headRoot())
|
||||
require.NoError(t, service.notifyEngineIfChangedHead(ctx, service.headRoot()))
|
||||
hookErr := "could not notify forkchoice update"
|
||||
invalidStateErr := "Could not get state from db"
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
@@ -139,7 +140,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
gb, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.saveInitSyncBlock(ctx, [32]byte{'a'}, gb))
|
||||
service.notifyEngineIfChangedHead(ctx, [32]byte{'a'})
|
||||
require.NoError(t, service.notifyEngineIfChangedHead(ctx, [32]byte{'a'}))
|
||||
require.LogsContain(t, hook, invalidStateErr)
|
||||
|
||||
hook.Reset()
|
||||
@@ -164,7 +165,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
state: st,
|
||||
}
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
|
||||
service.notifyEngineIfChangedHead(ctx, r1)
|
||||
require.NoError(t, service.notifyEngineIfChangedHead(ctx, r1))
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
|
||||
@@ -182,7 +183,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
state: st,
|
||||
}
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
|
||||
service.notifyEngineIfChangedHead(ctx, r1)
|
||||
require.NoError(t, service.notifyEngineIfChangedHead(ctx, r1))
|
||||
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2)
|
||||
@@ -192,7 +193,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
|
||||
// Test zero headRoot returns immediately.
|
||||
headRoot := service.headRoot()
|
||||
service.notifyEngineIfChangedHead(ctx, [32]byte{})
|
||||
require.NoError(t, service.notifyEngineIfChangedHead(ctx, [32]byte{}))
|
||||
require.Equal(t, service.headRoot(), headRoot)
|
||||
}
|
||||
|
||||
@@ -208,7 +209,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
service.SetGenesisTime(prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
copied := genesisState.Copy()
|
||||
@@ -226,7 +227,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
|
||||
// Generate attestatios for this block in Slot 1
|
||||
atts, err := util.NewAttestationUtil().GenerateAttestations(copied, pks, 1, 1, false)
|
||||
atts, err := util.GenerateAttestations(copied, pks, 1, 1, false)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
// Verify the target is in forchoice
|
||||
|
||||
@@ -7,17 +7,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/time"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// This defines how many epochs since finality the run time will begin to save hot state on to the DB.
|
||||
var epochsSinceFinalitySaveHotStateDB = types.Epoch(100)
|
||||
|
||||
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, blockRoot [32]byte) error
|
||||
@@ -53,18 +48,13 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaco
|
||||
return err
|
||||
}
|
||||
|
||||
// Have we been finalizing? Should we start saving hot states to db?
|
||||
if err := s.checkSaveHotStateDB(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
finalized := s.FinalizedCheckpt()
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
|
||||
// Log block sync status.
|
||||
justified := s.CurrentJustifiedCheckpt()
|
||||
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
|
||||
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime().Unix())); err != nil {
|
||||
log.WithError(err).Error("Unable to log block sync status")
|
||||
}
|
||||
// Log payload data
|
||||
@@ -160,25 +150,3 @@ func (s *Service) handlePostBlockOperations(b interfaces.BeaconBlock) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This checks whether it's time to start saving hot state to DB.
|
||||
// It's time when there's `epochsSinceFinalitySaveHotStateDB` epochs of non-finality.
|
||||
func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
|
||||
currentEpoch := slots.ToEpoch(s.CurrentSlot())
|
||||
// Prevent `sinceFinality` going underflow.
|
||||
var sinceFinality types.Epoch
|
||||
finalized := s.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
if currentEpoch > finalized.Epoch {
|
||||
sinceFinality = currentEpoch - finalized.Epoch
|
||||
}
|
||||
|
||||
if sinceFinality >= epochsSinceFinalitySaveHotStateDB {
|
||||
s.cfg.StateGen.EnableSaveHotStateToDB(ctx)
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.cfg.StateGen.DisableSaveHotStateToDB(ctx)
|
||||
}
|
||||
|
||||
@@ -2,10 +2,6 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
blockchainTesting "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
@@ -21,7 +17,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestService_ReceiveBlock(t *testing.T) {
|
||||
@@ -284,40 +281,3 @@ func TestService_HasBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, s.HasBlock(context.Background(), r))
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
opts := testServiceOptsWithDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
assert.LogsContain(t, hook, "Entering mode to save hot states in DB")
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
assert.LogsContain(t, hook, "Exiting mode to save hot states in DB")
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB")
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/geninit"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -17,7 +18,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
f "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
@@ -53,7 +52,6 @@ type Service struct {
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
@@ -65,6 +63,9 @@ type Service struct {
|
||||
justifiedBalances *stateBalanceCache
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
processAttestationsLock sync.Mutex
|
||||
clock Clock
|
||||
clockReady chan struct{}
|
||||
clockWaiter geninit.ClockWaiter
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -138,6 +139,70 @@ func (s *Service) Start() {
|
||||
s.fillMissingPayloadIDRoutine(s.ctx, s.cfg.StateNotifier.StateFeed())
|
||||
}
|
||||
|
||||
func (s *Service) CombinedStart(genesis state.BeaconState) error {
|
||||
// TODO: we only do this on the genesis code path, not from disk - why?
|
||||
/*
|
||||
// Update committee shuffled indices for genesis epoch.
|
||||
if err := helpers.UpdateCommitteeCache(ctx, genesisState, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
*/
|
||||
gt := time.Unix(int64(genesis.GenesisTime()), 0)
|
||||
s.SetGenesisTime(gt)
|
||||
s.cfg.AttService.SetGenesisTime(genesis.GenesisTime())
|
||||
|
||||
if features.Get().EnableForkChoiceDoublyLinkedTree {
|
||||
s.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore = protoarray.New()
|
||||
}
|
||||
// TODO: move head to use fork choice - we currently do not call initializeHeadFromDB!!
|
||||
|
||||
gb, err := s.cfg.BeaconDB.GenesisBlock(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block from db")
|
||||
}
|
||||
if err := wrapper.BeaconBlockIsNil(gb); err != nil {
|
||||
return errors.Wrap(err, "nil value from database block query")
|
||||
}
|
||||
gbr, err := gb.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute hash_tree_root of genesis block")
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, genesis, gbr); err != nil {
|
||||
log.Fatalf("Could not process genesis block for fork choice: %v", err)
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(genesis.GenesisTime())
|
||||
|
||||
obr, err := s.cfg.BeaconDB.OriginCheckpointBlockRoot(s.ctx)
|
||||
if err == nil {
|
||||
// this means checkpoint sync was used, use the database origin root value
|
||||
s.originBlockRoot = obr
|
||||
} else {
|
||||
if !errors.Is(err, db.ErrNotFound) {
|
||||
return errors.Wrap(err, "could not retrieve checkpoint sync chain origin data from db")
|
||||
}
|
||||
// this means we got ErrNotFound, meaning checkpoint sync wasn't used, so the node should
|
||||
// be synced from genesis. In this case, use the genesis block root as origin.
|
||||
s.originBlockRoot = gbr
|
||||
}
|
||||
// TODO: should we set origin root when its the genesis block root, or is this method only for checkpoint sync?
|
||||
s.cfg.ForkChoiceStore.SetOriginRoot(s.originBlockRoot)
|
||||
|
||||
spawnCountdownIfPreGenesis(s.ctx, gt, genesis)
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: gt,
|
||||
GenesisValidatorsRoot: genesis.GenesisValidatorsRoot(),
|
||||
},
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop the blockchain service's main event loop and associated goroutines.
|
||||
func (s *Service) Stop() error {
|
||||
defer s.cancel()
|
||||
@@ -173,7 +238,7 @@ func (s *Service) Status() error {
|
||||
// StartFromSavedState initializes the blockchain using a previously saved finalized checkpoint.
|
||||
func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
|
||||
s.SetGenesisTime(time.Unix(int64(saved.GenesisTime()), 0)) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
|
||||
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
|
||||
|
||||
originRoot, err := s.originRootFromSavedState(s.ctx)
|
||||
@@ -185,7 +250,12 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if err := s.initializeHeadFromDB(s.ctx); err != nil {
|
||||
return errors.Wrap(err, "could not set up chain info")
|
||||
}
|
||||
spawnCountdownIfPreGenesis(s.ctx, s.genesisTime, s.cfg.BeaconDB)
|
||||
gs, err := s.cfg.BeaconDB.GenesisState(s.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gt := time.Unix(int64(gs.GenesisTime()), 0)
|
||||
spawnCountdownIfPreGenesis(s.ctx, gt, gs)
|
||||
|
||||
justified, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
@@ -218,7 +288,7 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
|
||||
}
|
||||
forkChoicer.SetGenesisTime(uint64(s.genesisTime.Unix()))
|
||||
forkChoicer.SetGenesisTime(uint64(s.genesisTime().Unix()))
|
||||
|
||||
st, err := s.cfg.StateGen.StateByRoot(s.ctx, fRoot)
|
||||
if err != nil {
|
||||
@@ -247,7 +317,7 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: s.genesisTime,
|
||||
StartTime: s.genesisTime(),
|
||||
GenesisValidatorsRoot: saved.GenesisValidatorsRoot(),
|
||||
},
|
||||
})
|
||||
@@ -295,48 +365,11 @@ func (s *Service) initializeHeadFromDB(ctx context.Context) error {
|
||||
return errors.New("no finalized epoch in the database")
|
||||
}
|
||||
finalizedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
var finalizedState state.BeaconState
|
||||
|
||||
finalizedState, err = s.cfg.StateGen.Resume(ctx, s.cfg.FinalizedStateAtStartUp)
|
||||
finalizedState, err := s.cfg.StateGen.Resume(ctx, s.cfg.FinalizedStateAtStartUp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
|
||||
if flags.Get().HeadSync {
|
||||
headBlock, err := s.cfg.BeaconDB.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head block")
|
||||
}
|
||||
headEpoch := slots.ToEpoch(headBlock.Block().Slot())
|
||||
var epochsSinceFinality types.Epoch
|
||||
if headEpoch > finalized.Epoch {
|
||||
epochsSinceFinality = headEpoch - finalized.Epoch
|
||||
}
|
||||
// Head sync when node is far enough beyond known finalized epoch,
|
||||
// this becomes really useful during long period of non-finality.
|
||||
if epochsSinceFinality >= headSyncMinEpochsAfterCheckpoint {
|
||||
headRoot, err := headBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash head block")
|
||||
}
|
||||
finalizedState, err := s.cfg.StateGen.Resume(ctx, s.cfg.FinalizedStateAtStartUp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
log.Infof("Regenerating state from the last checkpoint at slot %d to current head slot of %d."+
|
||||
"This process may take a while, please wait.", finalizedState.Slot(), headBlock.Block().Slot())
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state")
|
||||
}
|
||||
s.setHead(headRoot, headBlock, headState)
|
||||
return nil
|
||||
} else {
|
||||
log.Warnf("Finalized checkpoint at slot %d is too close to the current head slot, "+
|
||||
"resetting head from the checkpoint ('--%s' flag is ignored).",
|
||||
finalizedState.Slot(), flags.HeadSync.Name)
|
||||
}
|
||||
}
|
||||
if finalizedState == nil || finalizedState.IsNil() {
|
||||
return errors.New("finalized state can't be nil")
|
||||
}
|
||||
@@ -352,34 +385,15 @@ func (s *Service) initializeHeadFromDB(ctx context.Context) error {
|
||||
|
||||
func (s *Service) startFromPOWChain() error {
|
||||
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
|
||||
if s.cfg.ChainStartFetcher == nil {
|
||||
return errors.New("not configured web3Service for POW chain")
|
||||
}
|
||||
go func() {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case e := <-stateChannel:
|
||||
if e.Type == statefeed.ChainStarted {
|
||||
data, ok := e.Data.(*statefeed.ChainStartedData)
|
||||
if !ok {
|
||||
log.Error("event data is not type *statefeed.ChainStartedData")
|
||||
return
|
||||
}
|
||||
log.WithField("starttime", data.StartTime).Debug("Received chain start event")
|
||||
s.onPowchainStart(s.ctx, data.StartTime)
|
||||
return
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state notifier failed")
|
||||
return
|
||||
}
|
||||
c, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("timeout while waiting for genesis during blockchain service startup")
|
||||
return
|
||||
}
|
||||
log.WithField("starttime", c.GenesisTime()).Debug("Received chain start event")
|
||||
s.onPowchainStart(s.ctx, c.GenesisTime())
|
||||
s.setClock(c)
|
||||
}()
|
||||
|
||||
return nil
|
||||
@@ -388,8 +402,7 @@ func (s *Service) startFromPOWChain() error {
|
||||
// onPowchainStart initializes a series of deposits from the ChainStart deposits in the eth1
|
||||
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
|
||||
func (s *Service) onPowchainStart(ctx context.Context, genesisTime time.Time) {
|
||||
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
|
||||
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
|
||||
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not initialize beacon chain: %v", err)
|
||||
}
|
||||
@@ -414,29 +427,10 @@ func (s *Service) onPowchainStart(ctx context.Context, genesisTime time.Time) {
|
||||
// initializes the state and genesis block of the beacon chain to persistent storage
|
||||
// based on a genesis timestamp value obtained from the ChainStart event emitted
|
||||
// by the ETH1.0 Deposit Contract and the POWChain service of the node.
|
||||
func (s *Service) initializeBeaconChain(
|
||||
ctx context.Context,
|
||||
genesisTime time.Time,
|
||||
preGenesisState state.BeaconState,
|
||||
eth1data *ethpb.Eth1Data) (state.BeaconState, error) {
|
||||
func (s *Service) initializeBeaconChain(ctx context.Context, genesisTime time.Time) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.Service.initializeBeaconChain")
|
||||
defer span.End()
|
||||
s.genesisTime = genesisTime
|
||||
unixTime := uint64(genesisTime.Unix())
|
||||
|
||||
genesisState, err := transition.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not initialize genesis state")
|
||||
}
|
||||
|
||||
if err := s.saveGenesisData(ctx, genesisState); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save genesis data")
|
||||
}
|
||||
|
||||
log.Info("Initialized beacon chain genesis state")
|
||||
|
||||
// Clear out all pre-genesis data now that the state is initialized.
|
||||
s.cfg.ChainStartFetcher.ClearPreGenesisData()
|
||||
s.SetGenesisTime(genesisTime)
|
||||
|
||||
// Update committee shuffled indices for genesis epoch.
|
||||
if err := helpers.UpdateCommitteeCache(ctx, genesisState, 0); err != nil {
|
||||
@@ -447,15 +441,11 @@ func (s *Service) initializeBeaconChain(
|
||||
}
|
||||
|
||||
s.cfg.AttService.SetGenesisTime(genesisState.GenesisTime())
|
||||
|
||||
return genesisState, nil
|
||||
}
|
||||
|
||||
// This gets called when beacon chain is first initialized to save genesis data (state, block, and more) in db.
|
||||
func (s *Service) saveGenesisData(ctx context.Context, genesisState state.BeaconState) error {
|
||||
if err := s.cfg.BeaconDB.SaveGenesisData(ctx, genesisState); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis data")
|
||||
}
|
||||
genesisBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil || genesisBlk == nil || genesisBlk.IsNil() {
|
||||
return fmt.Errorf("could not load genesis block: %v", err)
|
||||
@@ -476,7 +466,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "Could not set optimistic status of genesis block to false")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime().Unix()))
|
||||
|
||||
s.setHead(genesisBlkRoot, genesisBlk, genesisState)
|
||||
return nil
|
||||
@@ -494,19 +484,14 @@ func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
|
||||
return s.cfg.BeaconDB.HasBlock(ctx, root)
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
currentTime := prysmTime.Now()
|
||||
if currentTime.After(genesisTime) {
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, gt time.Time, gs state.BeaconState) {
|
||||
// only proceed if this function runs before genesis time
|
||||
if prysmTime.Now().After(gt) {
|
||||
return
|
||||
}
|
||||
|
||||
gState, err := db.GenesisState(ctx)
|
||||
gr, err := gs.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not retrieve genesis state: %v", err)
|
||||
log.WithError(err).Fatal("Could not compute hash_tree_root of genesis state")
|
||||
}
|
||||
gRoot, err := gState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not hash tree root genesis state: %v", err)
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
|
||||
go slots.CountdownToGenesis(ctx, gt, uint64(gs.NumValidators()), gr)
|
||||
}
|
||||
|
||||
@@ -136,7 +136,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
|
||||
chainService, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err, "Unable to setup chain service")
|
||||
chainService.genesisTime = time.Unix(1, 0) // non-zero time
|
||||
chainService.SetGenesisTime(time.Unix(1, 0)) // non-zero time
|
||||
|
||||
return chainService
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ package testing
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -64,6 +65,7 @@ type ChainService struct {
|
||||
ReceiveBlockMockErr error
|
||||
OptimisticCheckRootReceived [32]byte
|
||||
FinalizedRoots map[[32]byte]bool
|
||||
Clock blockchain.Clock
|
||||
}
|
||||
|
||||
// ForkChoicer mocks the same method in the chain service
|
||||
@@ -326,6 +328,13 @@ func (s *ChainService) GenesisTime() time.Time {
|
||||
return s.Genesis
|
||||
}
|
||||
|
||||
func (s *ChainService) WaitForClock(ctx context.Context) (blockchain.Clock, error) {
|
||||
if s.Clock == nil {
|
||||
return blockchain.NewClock(time.Now()), nil
|
||||
}
|
||||
return s.Clock, nil
|
||||
}
|
||||
|
||||
// GenesisValidatorsRoot mocks the same method in the chain service.
|
||||
func (s *ChainService) GenesisValidatorsRoot() [32]byte {
|
||||
return s.ValidatorsRoot
|
||||
@@ -336,7 +345,7 @@ func (s *ChainService) CurrentSlot() types.Slot {
|
||||
if s.Slot != nil {
|
||||
return *s.Slot
|
||||
}
|
||||
return types.Slot(uint64(time.Now().Unix()-s.Genesis.Unix()) / params.BeaconConfig().SecondsPerSlot)
|
||||
return s.Clock.CurrentSlot()
|
||||
}
|
||||
|
||||
// Participation mocks the same method in the chain service.
|
||||
@@ -464,3 +473,11 @@ func (s *ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterS
|
||||
func (s *ChainService) IsFinalized(_ context.Context, blockRoot [32]byte) bool {
|
||||
return s.FinalizedRoots[blockRoot]
|
||||
}
|
||||
|
||||
func NewMockClock(now time.Time, slotsAfterGenesis types.Slot) blockchain.Clock {
|
||||
offset := uint64(slotsAfterGenesis) * params.BeaconConfig().SecondsPerSlot
|
||||
genesis := now.Add(-1 * time.Second * time.Duration(offset))
|
||||
return blockchain.NewClock(genesis, blockchain.WithNow(func() time.Time {
|
||||
return genesis
|
||||
}))
|
||||
}
|
||||
@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"error.go",
|
||||
"metric.go",
|
||||
"option.go",
|
||||
"service.go",
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
package builder
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
var (
|
||||
ErrNotRunning = errors.New("builder is not running")
|
||||
)
|
||||
@@ -2,6 +2,7 @@ package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -21,7 +22,6 @@ import (
|
||||
type BlockBuilder interface {
|
||||
SubmitBlindedBlock(ctx context.Context, block *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error)
|
||||
GetHeader(ctx context.Context, slot types.Slot, parentHash [32]byte, pubKey [48]byte) (*ethpb.SignedBuilderBid, error)
|
||||
Status() error
|
||||
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
|
||||
Configured() bool
|
||||
}
|
||||
@@ -60,6 +60,12 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
return nil, err
|
||||
}
|
||||
s.c = c
|
||||
|
||||
// Is the builder up?
|
||||
if err := s.c.Status(ctx); err != nil {
|
||||
return nil, fmt.Errorf("could not connect to builder: %v", err)
|
||||
}
|
||||
|
||||
log.WithField("endpoint", c.NodeURL()).Info("Builder has been configured")
|
||||
}
|
||||
return s, nil
|
||||
|
||||
@@ -15,7 +15,6 @@ type MockBuilderService struct {
|
||||
ErrSubmitBlindedBlock error
|
||||
Bid *ethpb.SignedBuilderBid
|
||||
ErrGetHeader error
|
||||
ErrStatus error
|
||||
ErrRegisterValidator error
|
||||
}
|
||||
|
||||
@@ -34,11 +33,6 @@ func (s *MockBuilderService) GetHeader(context.Context, types.Slot, [32]byte, [4
|
||||
return s.Bid, s.ErrGetHeader
|
||||
}
|
||||
|
||||
// Status for mocking.
|
||||
func (s *MockBuilderService) Status() error {
|
||||
return s.ErrStatus
|
||||
}
|
||||
|
||||
// RegisterValidator for mocking.
|
||||
func (s *MockBuilderService) RegisterValidator(context.Context, []*ethpb.SignedValidatorRegistrationV1) error {
|
||||
return s.ErrRegisterValidator
|
||||
|
||||
@@ -77,7 +77,7 @@ func New() (*DepositCache, error) {
|
||||
// InsertDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -111,7 +111,7 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
|
||||
|
||||
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
|
||||
func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*ethpb.DepositContainer) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
@@ -130,7 +130,7 @@ func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*eth
|
||||
|
||||
// InsertFinalizedDeposits inserts deposits up to eth1DepositIndex (inclusive) into the finalized deposits cache.
|
||||
func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1DepositIndex int64) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertFinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
@@ -180,7 +180,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
|
||||
|
||||
// AllDepositContainers returns all historical deposit containers.
|
||||
func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.DepositContainer {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.AllDepositContainers")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.AllDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -191,7 +191,7 @@ func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.Depos
|
||||
// AllDeposits returns a list of historical deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all historical deposits.
|
||||
func (dc *DepositCache) AllDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.AllDeposits")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.AllDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -212,7 +212,7 @@ func (dc *DepositCache) allDeposits(untilBlk *big.Int) []*ethpb.Deposit {
|
||||
// DepositsNumberAndRootAtHeight returns number of deposits made up to blockheight and the
|
||||
// root that corresponds to the latest deposit at that blockheight.
|
||||
func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -228,7 +228,7 @@ func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, block
|
||||
// DepositByPubkey looks through historical deposits and finds one which contains
|
||||
// a certain public key within its deposit data.
|
||||
func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -249,7 +249,7 @@ func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*et
|
||||
|
||||
// FinalizedDeposits returns the finalized deposits trie.
|
||||
func (dc *DepositCache) FinalizedDeposits(ctx context.Context) *FinalizedDeposits {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.FinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
@@ -29,7 +29,7 @@ type PendingDepositsFetcher interface {
|
||||
// InsertPendingDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -66,7 +66,7 @@ func (dc *DepositCache) PendingDeposits(ctx context.Context, untilBlk *big.Int)
|
||||
// PendingContainers returns a list of deposit containers until the given block number
|
||||
// (inclusive).
|
||||
func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -90,7 +90,7 @@ func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int
|
||||
// RemovePendingDeposit from the database. The deposit is indexed by the
|
||||
// Index. This method does nothing if deposit ptr is nil.
|
||||
func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Deposit) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.RemovePendingDeposit")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.RemovePendingDeposit")
|
||||
defer span.End()
|
||||
|
||||
if d == nil {
|
||||
@@ -128,7 +128,7 @@ func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Depos
|
||||
|
||||
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
|
||||
func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
|
||||
_, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
if merkleTreeIndex == 0 {
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
|
||||
func TestProcessAttestations_InclusionDelayFailure(t *testing.T) {
|
||||
attestations := []*ethpb.Attestation{
|
||||
util.NewAttestationUtil().HydrateAttestation(ðpb.Attestation{
|
||||
util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, fieldparams.RootLength)},
|
||||
Slot: 5,
|
||||
@@ -55,7 +55,7 @@ func TestProcessAttestations_InclusionDelayFailure(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessAttestations_NeitherCurrentNorPrevEpoch(t *testing.T) {
|
||||
att := util.NewAttestationUtil().HydrateAttestation(ðpb.Attestation{
|
||||
att := util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: []byte("hello-world")},
|
||||
Target: ðpb.Checkpoint{Epoch: 0}}})
|
||||
@@ -201,7 +201,7 @@ func TestProcessAttestations_OK(t *testing.T) {
|
||||
aggBits.SetBitAt(0, true)
|
||||
var mockRoot [32]byte
|
||||
copy(mockRoot[:], "hello-world")
|
||||
att := util.NewAttestationUtil().HydrateAttestation(ðpb.Attestation{
|
||||
att := util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
// InitializePrecomputeValidators precomputes individual validator for its attested balances and the total sum of validators attested balances of the epoch.
|
||||
func InitializePrecomputeValidators(ctx context.Context, beaconState state.BeaconState) ([]*precompute.Validator, *precompute.Balance, error) {
|
||||
_, span := trace.StartSpan(ctx, "altair.InitializePrecomputeValidators")
|
||||
ctx, span := trace.StartSpan(ctx, "altair.InitializePrecomputeValidators")
|
||||
defer span.End()
|
||||
vals := make([]*precompute.Validator, beaconState.NumValidators())
|
||||
bal := &precompute.Balance{}
|
||||
@@ -76,7 +76,7 @@ func ProcessInactivityScores(
|
||||
beaconState state.BeaconState,
|
||||
vals []*precompute.Validator,
|
||||
) (state.BeaconState, []*precompute.Validator, error) {
|
||||
_, span := trace.StartSpan(ctx, "altair.ProcessInactivityScores")
|
||||
ctx, span := trace.StartSpan(ctx, "altair.ProcessInactivityScores")
|
||||
defer span.End()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
@@ -144,7 +144,7 @@ func ProcessEpochParticipation(
|
||||
bal *precompute.Balance,
|
||||
vals []*precompute.Validator,
|
||||
) ([]*precompute.Validator, *precompute.Balance, error) {
|
||||
_, span := trace.StartSpan(ctx, "altair.ProcessEpochParticipation")
|
||||
ctx, span := trace.StartSpan(ctx, "altair.ProcessEpochParticipation")
|
||||
defer span.End()
|
||||
|
||||
cp, err := beaconState.CurrentEpochParticipation()
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"attestation.go",
|
||||
"attester_slashing.go",
|
||||
"deposit.go",
|
||||
"error.go",
|
||||
"eth1_data.go",
|
||||
"exit.go",
|
||||
"genesis.go",
|
||||
@@ -15,6 +16,7 @@ go_library(
|
||||
"proposer_slashing.go",
|
||||
"randao.go",
|
||||
"signature.go",
|
||||
"withdrawals.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks",
|
||||
visibility = [
|
||||
@@ -31,7 +33,6 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/forks/bellatrix:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
@@ -40,6 +41,7 @@ go_library(
|
||||
"//contracts/deposit:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//crypto/hash/htr:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
@@ -75,6 +77,7 @@ go_test(
|
||||
"proposer_slashing_test.go",
|
||||
"randao_test.go",
|
||||
"signature_test.go",
|
||||
"withdrawals_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
@@ -89,11 +92,11 @@ go_test(
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/forks/bellatrix:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash/htr:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
|
||||
func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 100)
|
||||
data := util.NewAttestationUtil().HydrateAttestationData(ðpb.AttestationData{
|
||||
data := util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: bytesutil.PadTo([]byte("hello-world"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: bytesutil.PadTo([]byte("hello-world"), 32)},
|
||||
})
|
||||
@@ -85,7 +85,7 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
|
||||
func TestVerifyAttestationNoVerifySignature_IncorrectSlotTargetEpoch(t *testing.T) {
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 1)
|
||||
|
||||
att := util.NewAttestationUtil().HydrateAttestation(ðpb.Attestation{
|
||||
att := util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
@@ -218,7 +218,7 @@ func TestConvertToIndexed_OK(t *testing.T) {
|
||||
|
||||
var sig [fieldparams.BLSSignatureLength]byte
|
||||
copy(sig[:], "signed")
|
||||
att := util.NewAttestationUtil().HydrateAttestation(ðpb.Attestation{
|
||||
att := util.HydrateAttestation(ðpb.Attestation{
|
||||
Signature: sig[:],
|
||||
})
|
||||
for _, tt := range tests {
|
||||
@@ -261,12 +261,11 @@ func TestVerifyIndexedAttestation_OK(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
au := util.AttestationUtil{}
|
||||
tests := []struct {
|
||||
attestation *ethpb.IndexedAttestation
|
||||
}{
|
||||
{attestation: ðpb.IndexedAttestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 2,
|
||||
},
|
||||
@@ -276,7 +275,7 @@ func TestVerifyIndexedAttestation_OK(t *testing.T) {
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}},
|
||||
{attestation: ðpb.IndexedAttestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
},
|
||||
@@ -285,7 +284,7 @@ func TestVerifyIndexedAttestation_OK(t *testing.T) {
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}},
|
||||
{attestation: ðpb.IndexedAttestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 4,
|
||||
},
|
||||
@@ -294,7 +293,7 @@ func TestVerifyIndexedAttestation_OK(t *testing.T) {
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}},
|
||||
{attestation: ðpb.IndexedAttestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 7,
|
||||
},
|
||||
@@ -412,8 +411,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
|
||||
|
||||
comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateAttestation(ðpb.Attestation{
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
@@ -432,7 +430,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
|
||||
|
||||
comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1*params.BeaconConfig().SlotsPerEpoch+1 /*slot*/, 1 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att2 := au.HydrateAttestation(ðpb.Attestation{
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1*params.BeaconConfig().SlotsPerEpoch + 1,
|
||||
@@ -472,8 +470,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
|
||||
|
||||
comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateAttestation(ðpb.Attestation{
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
@@ -492,7 +489,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
|
||||
|
||||
comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 1 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att2 := au.HydrateAttestation(ðpb.Attestation{
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
@@ -537,8 +534,7 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
|
||||
|
||||
comm1, err := helpers.BeaconCommitteeFromState(ctx, st, 1 /*slot*/, 0 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateAttestation(ðpb.Attestation{
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
@@ -557,7 +553,7 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
|
||||
|
||||
comm2, err := helpers.BeaconCommitteeFromState(ctx, st, 1 /*slot*/, 1 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att2 := au.HydrateAttestation(ðpb.Attestation{
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
|
||||
@@ -19,12 +19,11 @@ import (
|
||||
)
|
||||
|
||||
func TestSlashableAttestationData_CanSlash(t *testing.T) {
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateAttestationData(ðpb.AttestationData{
|
||||
att1 := util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, 32)},
|
||||
})
|
||||
att2 := au.HydrateAttestationData(ðpb.AttestationData{
|
||||
att2 := util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'B'}, 32)},
|
||||
})
|
||||
@@ -36,10 +35,9 @@ func TestSlashableAttestationData_CanSlash(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
|
||||
au := util.AttestationUtil{}
|
||||
slashings := []*ethpb.AttesterSlashing{{
|
||||
Attestation_1: au.HydrateIndexedAttestation(ðpb.IndexedAttestation{}),
|
||||
Attestation_2: au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Attestation_1: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{}),
|
||||
Attestation_2: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
Target: ðpb.Checkpoint{Epoch: 1}},
|
||||
@@ -73,16 +71,15 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
slashings := []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Attestation_1: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: make([]uint64, params.BeaconConfig().MaxValidatorsPerCommittee+1),
|
||||
}),
|
||||
Attestation_2: au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Attestation_2: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: make([]uint64, params.BeaconConfig().MaxValidatorsPerCommittee+1),
|
||||
}),
|
||||
},
|
||||
@@ -105,8 +102,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
vv.WithdrawableEpoch = types.Epoch(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
@@ -121,7 +117,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
att2 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att2 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
@@ -175,8 +171,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusAltair(t *testing.T) {
|
||||
vv.WithdrawableEpoch = types.Epoch(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
@@ -191,7 +186,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusAltair(t *testing.T) {
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
att2 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att2 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
@@ -245,8 +240,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
|
||||
vv.WithdrawableEpoch = types.Epoch(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
@@ -261,7 +255,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
att2 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att2 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
|
||||
@@ -39,9 +39,8 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
|
||||
expectedSlashedVal := 2800
|
||||
|
||||
root1 := [32]byte{'d', 'o', 'u', 'b', 'l', 'e', '1'}
|
||||
au := util.AttestationUtil{}
|
||||
att1 := ðpb.IndexedAttestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{Target: ðpb.Checkpoint{Epoch: 0, Root: root1[:]}}),
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{Target: ðpb.Checkpoint{Epoch: 0, Root: root1[:]}}),
|
||||
AttestingIndices: setA,
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
@@ -59,7 +58,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
|
||||
|
||||
root2 := [32]byte{'d', 'o', 'u', 'b', 'l', 'e', '2'}
|
||||
att2 := ðpb.IndexedAttestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: root2[:]},
|
||||
}),
|
||||
AttestingIndices: setB,
|
||||
|
||||
8
beacon-chain/core/blocks/error.go
Normal file
8
beacon-chain/core/blocks/error.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package blocks
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
var errNilSignedWithdrawalMessage = errors.New("nil SignedBLSToExecutionChange message")
|
||||
var errNilWithdrawalMessage = errors.New("nil BLSToExecutionChange message")
|
||||
var errInvalidBLSPrefix = errors.New("withdrawal credential prefix is not a BLS prefix")
|
||||
var errInvalidWithdrawalCredentials = errors.New("withdrawal credentials do not match")
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/forks/bellatrix"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
@@ -39,20 +38,15 @@ func IsMergeTransitionComplete(st state.BeaconState) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !bellatrix.IsEmptyHeader(h), nil
|
||||
}
|
||||
|
||||
// IsMergeTransitionBlockUsingPreStatePayloadHeader returns true if the input block is the terminal merge block.
|
||||
// Terminal merge block must be associated with an empty payload header.
|
||||
// This assumes the header `h` is referenced as the parent state for block body `body.
|
||||
func IsMergeTransitionBlockUsingPreStatePayloadHeader(h *enginev1.ExecutionPayloadHeader, body interfaces.BeaconBlockBody) (bool, error) {
|
||||
if h == nil || body == nil {
|
||||
return false, errors.New("nil header or block body")
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(h)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !bellatrix.IsEmptyHeader(h) {
|
||||
return false, nil
|
||||
isEmpty, err := wrapper.IsEmptyExecutionData(wrappedHeader)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return IsExecutionBlock(body)
|
||||
return !isEmpty, nil
|
||||
}
|
||||
|
||||
// IsExecutionBlock returns whether the block has a non-empty ExecutionPayload.
|
||||
@@ -64,7 +58,7 @@ func IsExecutionBlock(body interfaces.BeaconBlockBody) (bool, error) {
|
||||
if body == nil {
|
||||
return false, errors.New("nil block body")
|
||||
}
|
||||
payload, err := body.ExecutionPayload()
|
||||
payload, err := body.Execution()
|
||||
switch {
|
||||
case errors.Is(err, wrapper.ErrUnsupportedField):
|
||||
return false, nil
|
||||
@@ -72,7 +66,11 @@ func IsExecutionBlock(body interfaces.BeaconBlockBody) (bool, error) {
|
||||
return false, err
|
||||
default:
|
||||
}
|
||||
return !bellatrix.IsEmptyPayload(payload), nil
|
||||
isEmpty, err := wrapper.IsEmptyExecutionData(payload)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !isEmpty, nil
|
||||
}
|
||||
|
||||
// IsExecutionEnabled returns true if the beacon chain can begin executing.
|
||||
@@ -98,7 +96,15 @@ func IsExecutionEnabled(st state.BeaconState, body interfaces.BeaconBlockBody) (
|
||||
// IsExecutionEnabledUsingHeader returns true if the execution is enabled using post processed payload header and block body.
|
||||
// This is an optimized version of IsExecutionEnabled where beacon state is not required as an argument.
|
||||
func IsExecutionEnabledUsingHeader(header *enginev1.ExecutionPayloadHeader, body interfaces.BeaconBlockBody) (bool, error) {
|
||||
if !bellatrix.IsEmptyHeader(header) {
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(header)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
isEmpty, err := wrapper.IsEmptyExecutionData(wrappedHeader)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !isEmpty {
|
||||
return true, nil
|
||||
}
|
||||
return IsExecutionBlock(body)
|
||||
@@ -116,7 +122,7 @@ func IsPreBellatrixVersion(v int) bool {
|
||||
// # Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
// if is_merge_complete(state):
|
||||
// assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload *enginev1.ExecutionPayload) error {
|
||||
func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload interfaces.ExecutionData) error {
|
||||
complete, err := IsMergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -129,7 +135,7 @@ func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload *enginev1.E
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(payload.ParentHash, header.BlockHash) {
|
||||
if !bytes.Equal(payload.ParentHash(), header.BlockHash) {
|
||||
return errors.New("incorrect block hash")
|
||||
}
|
||||
return nil
|
||||
@@ -143,20 +149,20 @@ func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload *enginev1.E
|
||||
// assert payload.random == get_randao_mix(state, get_current_epoch(state))
|
||||
// # Verify timestamp
|
||||
// assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
|
||||
func ValidatePayload(st state.BeaconState, payload *enginev1.ExecutionPayload) error {
|
||||
func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) error {
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(payload.PrevRandao, random) {
|
||||
if !bytes.Equal(payload.PrevRandao(), random) {
|
||||
return ErrInvalidPayloadPrevRandao
|
||||
}
|
||||
t, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.Timestamp != uint64(t.Unix()) {
|
||||
if payload.Timestamp() != uint64(t.Unix()) {
|
||||
return ErrInvalidPayloadTimeStamp
|
||||
}
|
||||
return nil
|
||||
@@ -194,27 +200,29 @@ func ValidatePayload(st state.BeaconState, payload *enginev1.ExecutionPayload) e
|
||||
// block_hash=payload.block_hash,
|
||||
// transactions_root=hash_tree_root(payload.transactions),
|
||||
// )
|
||||
func ProcessPayload(st state.BeaconState, payload *enginev1.ExecutionPayload) (state.BeaconState, error) {
|
||||
func ProcessPayload(st state.BeaconState, payload interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
if err := ValidatePayloadWhenMergeCompletes(st, payload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ValidatePayload(st, payload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
header, err := bellatrix.PayloadToHeader(payload)
|
||||
header, err := wrapper.PayloadToHeader(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := st.SetLatestExecutionPayloadHeader(header); err != nil {
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := st.SetLatestExecutionPayloadHeader(wrappedHeader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// ValidatePayloadHeaderWhenMergeCompletes validates the payload header when the merge completes.
|
||||
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header *enginev1.ExecutionPayloadHeader) error {
|
||||
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header interfaces.ExecutionData) error {
|
||||
// Skip validation if the state is not merge compatible.
|
||||
complete, err := IsMergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
@@ -228,20 +236,20 @@ func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header *engin
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(header.ParentHash, h.BlockHash) {
|
||||
if !bytes.Equal(header.ParentHash(), h.BlockHash) {
|
||||
return ErrInvalidPayloadBlockHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidatePayloadHeader validates the payload header.
|
||||
func ValidatePayloadHeader(st state.BeaconState, header *enginev1.ExecutionPayloadHeader) error {
|
||||
func ValidatePayloadHeader(st state.BeaconState, header interfaces.ExecutionData) error {
|
||||
// Validate header's random mix matches with state in current epoch
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(header.PrevRandao, random) {
|
||||
if !bytes.Equal(header.PrevRandao(), random) {
|
||||
return ErrInvalidPayloadPrevRandao
|
||||
}
|
||||
|
||||
@@ -250,22 +258,20 @@ func ValidatePayloadHeader(st state.BeaconState, header *enginev1.ExecutionPaylo
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if header.Timestamp != uint64(t.Unix()) {
|
||||
if header.Timestamp() != uint64(t.Unix()) {
|
||||
return ErrInvalidPayloadTimeStamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessPayloadHeader processes the payload header.
|
||||
func ProcessPayloadHeader(st state.BeaconState, header *enginev1.ExecutionPayloadHeader) (state.BeaconState, error) {
|
||||
func ProcessPayloadHeader(st state.BeaconState, header interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
if err := ValidatePayloadHeaderWhenMergeCompletes(st, header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ValidatePayloadHeader(st, header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := st.SetLatestExecutionPayloadHeader(header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -278,9 +284,9 @@ func GetBlockPayloadHash(blk interfaces.BeaconBlock) ([32]byte, error) {
|
||||
if IsPreBellatrixVersion(blk.Version()) {
|
||||
return payloadHash, nil
|
||||
}
|
||||
payload, err := blk.Body().ExecutionPayload()
|
||||
payload, err := blk.Body().Execution()
|
||||
if err != nil {
|
||||
return payloadHash, err
|
||||
}
|
||||
return bytesutil.ToBytes32(payload.BlockHash), nil
|
||||
return bytesutil.ToBytes32(payload.BlockHash()), nil
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/forks/bellatrix"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
@@ -102,15 +101,6 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
}(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "has tx root",
|
||||
payload: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.TransactionsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
}(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "has extra data",
|
||||
payload: func() *enginev1.ExecutionPayloadHeader {
|
||||
@@ -160,7 +150,9 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.payload))
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.payload)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
got, err := blocks.IsMergeTransitionComplete(st)
|
||||
require.NoError(t, err)
|
||||
if got != tt.want {
|
||||
@@ -170,185 +162,6 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_IsMergeTransitionBlockUsingPayloadHeader(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
payload *enginev1.ExecutionPayload
|
||||
header *enginev1.ExecutionPayloadHeader
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "empty header, empty payload",
|
||||
payload: emptyPayload(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "non-empty header, empty payload",
|
||||
payload: emptyPayload(),
|
||||
header: func() *enginev1.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
}(),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has parent hash",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has fee recipient",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.FeeRecipient = bytesutil.PadTo([]byte{'a'}, fieldparams.FeeRecipientLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has state root",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has receipt root",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has logs bloom",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.LogsBloom = bytesutil.PadTo([]byte{'a'}, fieldparams.LogsBloomLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has random",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has base fee",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.BaseFeePerGas = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has block hash",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has tx",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.Transactions = [][]byte{{'a'}}
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has extra data",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.ExtraData = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has block number",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.BlockNumber = 1
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has gas limit",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.GasLimit = 1
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has gas used",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.GasUsed = 1
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has timestamp",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.Timestamp = 1
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload = tt.payload
|
||||
body, err := wrapper.WrappedBeaconBlockBody(blk.Block.Body)
|
||||
require.NoError(t, err)
|
||||
got, err := blocks.IsMergeTransitionBlockUsingPreStatePayloadHeader(tt.header, body)
|
||||
require.NoError(t, err)
|
||||
if got != tt.want {
|
||||
t.Errorf("MergeTransitionBlock() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_IsExecutionBlock(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -442,7 +255,9 @@ func Test_IsExecutionEnabled(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.header))
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload = tt.payload
|
||||
body, err := wrapper.WrappedBeaconBlockBody(blk.Block.Body)
|
||||
@@ -567,8 +382,12 @@ func Test_ValidatePayloadWhenMergeCompletes(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.header))
|
||||
err := blocks.ValidatePayloadWhenMergeCompletes(st, tt.payload)
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(tt.payload)
|
||||
require.NoError(t, err)
|
||||
err = blocks.ValidatePayloadWhenMergeCompletes(st, wrappedPayload)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
@@ -616,7 +435,9 @@ func Test_ValidatePayload(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := blocks.ValidatePayload(st, tt.payload)
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(tt.payload)
|
||||
require.NoError(t, err)
|
||||
err = blocks.ValidatePayload(st, wrappedPayload)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
@@ -664,12 +485,14 @@ func Test_ProcessPayload(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, err := blocks.ProcessPayload(st, tt.payload)
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(tt.payload)
|
||||
require.NoError(t, err)
|
||||
st, err := blocks.ProcessPayload(st, wrappedPayload)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
require.Equal(t, tt.err, err)
|
||||
want, err := bellatrix.PayloadToHeader(tt.payload)
|
||||
want, err := wrapper.PayloadToHeader(wrappedPayload)
|
||||
require.Equal(t, tt.err, err)
|
||||
got, err := st.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
@@ -717,7 +540,9 @@ func Test_ProcessPayloadHeader(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, err := blocks.ProcessPayloadHeader(st, tt.header)
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
st, err := blocks.ProcessPayloadHeader(st, wrappedHeader)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
@@ -768,7 +593,9 @@ func Test_ValidatePayloadHeader(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := blocks.ValidatePayloadHeader(st, tt.header)
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
err = blocks.ValidatePayloadHeader(st, wrappedHeader)
|
||||
require.Equal(t, tt.err, err)
|
||||
})
|
||||
}
|
||||
@@ -777,7 +604,9 @@ func Test_ValidatePayloadHeader(t *testing.T) {
|
||||
func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
emptySt := st.Copy()
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(&enginev1.ExecutionPayloadHeader{BlockHash: []byte{'a'}}))
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(&enginev1.ExecutionPayloadHeader{BlockHash: []byte{'a'}})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
@@ -816,7 +645,9 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := blocks.ValidatePayloadHeaderWhenMergeCompletes(tt.state, tt.header)
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(tt.header)
|
||||
require.NoError(t, err)
|
||||
err = blocks.ValidatePayloadHeaderWhenMergeCompletes(tt.state, wrappedHeader)
|
||||
require.Equal(t, tt.err, err)
|
||||
})
|
||||
}
|
||||
@@ -824,7 +655,9 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
|
||||
func Test_PayloadToHeader(t *testing.T) {
|
||||
p := emptyPayload()
|
||||
h, err := bellatrix.PayloadToHeader(p)
|
||||
wrappedPayload, err := wrapper.WrappedExecutionPayload(p)
|
||||
require.NoError(t, err)
|
||||
h, err := wrapper.PayloadToHeader(wrappedPayload)
|
||||
require.NoError(t, err)
|
||||
txRoot, err := ssz.TransactionsRoot(p.Transactions)
|
||||
require.NoError(t, err)
|
||||
@@ -863,7 +696,9 @@ func Test_PayloadToHeader(t *testing.T) {
|
||||
|
||||
func BenchmarkBellatrixComplete(b *testing.B) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(b, 1)
|
||||
require.NoError(b, st.SetLatestExecutionPayloadHeader(emptyPayloadHeader()))
|
||||
wrappedHeader, err := wrapper.WrappedExecutionPayloadHeader(emptyPayloadHeader())
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, st.SetLatestExecutionPayloadHeader(wrappedHeader))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
78
beacon-chain/core/blocks/withdrawals.go
Normal file
78
beacon-chain/core/blocks/withdrawals.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash/htr"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
const executionToBLSPadding = 12
|
||||
|
||||
// ProcessBLSToExecutionChange validates a SignedBLSToExecution message and
|
||||
// changes the validator's withdrawal address accordingly.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
//def process_bls_to_execution_change(state: BeaconState, signed_address_change: SignedBLSToExecutionChange) -> None:
|
||||
// validator = state.validators[address_change.validator_index]
|
||||
//
|
||||
// assert validator.withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX
|
||||
// assert validator.withdrawal_credentials[1:] == hash(address_change.from_bls_pubkey)[1:]
|
||||
//
|
||||
// domain = get_domain(state, DOMAIN_BLS_TO_EXECUTION_CHANGE)
|
||||
// signing_root = compute_signing_root(address_change, domain)
|
||||
// assert bls.Verify(address_change.from_bls_pubkey, signing_root, signed_address_change.signature)
|
||||
//
|
||||
// validator.withdrawal_credentials = (
|
||||
// ETH1_ADDRESS_WITHDRAWAL_PREFIX
|
||||
// + b'\x00' * 11
|
||||
// + address_change.to_execution_address
|
||||
// )
|
||||
//
|
||||
func ProcessBLSToExecutionChange(st state.BeaconState, signed *ethpb.SignedBLSToExecutionChange) (state.BeaconState, error) {
|
||||
if signed == nil {
|
||||
return st, errNilSignedWithdrawalMessage
|
||||
}
|
||||
message := signed.Message
|
||||
if message == nil {
|
||||
return st, errNilWithdrawalMessage
|
||||
}
|
||||
|
||||
val, err := st.ValidatorAtIndex(message.ValidatorIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cred := val.WithdrawalCredentials
|
||||
if cred[0] != params.BeaconConfig().BLSWithdrawalPrefixByte {
|
||||
return nil, errInvalidBLSPrefix
|
||||
}
|
||||
|
||||
// hash the public key and verify it matches the withdrawal credentials
|
||||
fromPubkey := message.FromBlsPubkey
|
||||
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(fromPubkey[:32]), bytesutil.ToBytes32(fromPubkey[32:])}
|
||||
digest := make([][32]byte, 1)
|
||||
htr.VectorizedSha256(pubkeyChunks, digest)
|
||||
if !bytes.Equal(digest[0][1:], cred[1:]) {
|
||||
return nil, errInvalidWithdrawalCredentials
|
||||
}
|
||||
|
||||
epoch := slots.ToEpoch(st.Slot())
|
||||
domain, err := signing.Domain(st.Fork(), epoch, params.BeaconConfig().DomainBLSToExecutionChange, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := signing.VerifySigningRoot(message, fromPubkey, signed.Signature, domain); err != nil {
|
||||
return nil, signing.ErrSigFailedToVerify
|
||||
}
|
||||
newCredentials := make([]byte, executionToBLSPadding)
|
||||
newCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
val.WithdrawalCredentials = append(newCredentials, message.ToExecutionAddress...)
|
||||
err = st.UpdateValidatorAtIndex(message.ValidatorIndex, val)
|
||||
return st, err
|
||||
}
|
||||
193
beacon-chain/core/blocks/withdrawals_test.go
Normal file
193
beacon-chain/core/blocks/withdrawals_test.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash/htr"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestProcessBLSToExecutionChange(t *testing.T) {
|
||||
t.Run("happy case", func(t *testing.T) {
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ðpb.BLSToExecutionChange{
|
||||
ToExecutionAddress: []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13},
|
||||
ValidatorIndex: 0,
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(pubkey[:32]), bytesutil.ToBytes32(pubkey[32:])}
|
||||
digest := make([][32]byte, 1)
|
||||
htr.VectorizedSha256(pubkeyChunks, digest)
|
||||
digest[0][0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
WithdrawalCredentials: digest[0][:],
|
||||
},
|
||||
}
|
||||
st, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch * 5,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, priv)
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
Message: message,
|
||||
Signature: signature,
|
||||
}
|
||||
|
||||
st, err = blocks.ProcessBLSToExecutionChange(st, signed)
|
||||
require.NoError(t, err)
|
||||
|
||||
val, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, message.ToExecutionAddress, val.WithdrawalCredentials[12:])
|
||||
})
|
||||
|
||||
t.Run("non-existent validator", func(t *testing.T) {
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ðpb.BLSToExecutionChange{
|
||||
ToExecutionAddress: []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13},
|
||||
ValidatorIndex: 1,
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(pubkey[:32]), bytesutil.ToBytes32(pubkey[32:])}
|
||||
digest := make([][32]byte, 1)
|
||||
htr.VectorizedSha256(pubkeyChunks, digest)
|
||||
digest[0][0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
WithdrawalCredentials: digest[0][:],
|
||||
},
|
||||
}
|
||||
st, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch * 5,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, priv)
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
Message: message,
|
||||
Signature: signature,
|
||||
}
|
||||
|
||||
_, err = blocks.ProcessBLSToExecutionChange(st, signed)
|
||||
require.ErrorContains(t, "out of range", err)
|
||||
})
|
||||
|
||||
t.Run("signature does not verify", func(t *testing.T) {
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ðpb.BLSToExecutionChange{
|
||||
ToExecutionAddress: []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13},
|
||||
ValidatorIndex: 0,
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
|
||||
},
|
||||
}
|
||||
st, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch * 5,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, priv)
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
Message: message,
|
||||
Signature: signature,
|
||||
}
|
||||
|
||||
_, err = blocks.ProcessBLSToExecutionChange(st, signed)
|
||||
require.ErrorContains(t, "withdrawal credentials do not match", err)
|
||||
})
|
||||
|
||||
t.Run("invalid BLS prefix", func(t *testing.T) {
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ðpb.BLSToExecutionChange{
|
||||
ToExecutionAddress: []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13},
|
||||
ValidatorIndex: 0,
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(pubkey[:32]), bytesutil.ToBytes32(pubkey[32:])}
|
||||
digest := make([][32]byte, 1)
|
||||
htr.VectorizedSha256(pubkeyChunks, digest)
|
||||
digest[0][0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
WithdrawalCredentials: digest[0][:],
|
||||
},
|
||||
}
|
||||
registry[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
|
||||
st, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch * 5,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, priv)
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
Message: message,
|
||||
Signature: signature,
|
||||
}
|
||||
|
||||
_, err = blocks.ProcessBLSToExecutionChange(st, signed)
|
||||
require.ErrorContains(t, "withdrawal credential prefix is not a BLS prefix", err)
|
||||
|
||||
})
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
package precompute
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
@@ -17,7 +15,7 @@ var errNilState = errors.New("nil state")
|
||||
|
||||
// UnrealizedCheckpoints returns the justification and finalization checkpoints of the
|
||||
// given state as if it was progressed with empty slots until the next epoch.
|
||||
func UnrealizedCheckpoints(ctx context.Context, st state.BeaconState) (*ethpb.Checkpoint, *ethpb.Checkpoint, error) {
|
||||
func UnrealizedCheckpoints(st state.BeaconState) (*ethpb.Checkpoint, *ethpb.Checkpoint, error) {
|
||||
if st == nil || st.IsNil() {
|
||||
return nil, nil, errNilState
|
||||
}
|
||||
|
||||
@@ -243,7 +243,7 @@ func TestUnrealizedCheckpoints(t *testing.T) {
|
||||
_, _, err = altair.InitializePrecomputeValidators(context.Background(), state)
|
||||
require.NoError(t, err)
|
||||
|
||||
jc, fc, err := precompute.UnrealizedCheckpoints(context.Background(), state)
|
||||
jc, fc, err := precompute.UnrealizedCheckpoints(state)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, test.expectedJustified, jc.Epoch)
|
||||
require.DeepEqual(t, test.expectedFinalized, fc.Epoch)
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
// pre computed instances of validators attesting records and total
|
||||
// balances attested in an epoch.
|
||||
func New(ctx context.Context, s state.BeaconState) ([]*Validator, *Balance, error) {
|
||||
_, span := trace.StartSpan(ctx, "precomputeEpoch.New")
|
||||
ctx, span := trace.StartSpan(ctx, "precomputeEpoch.New")
|
||||
defer span.End()
|
||||
|
||||
pValidators := make([]*Validator, s.NumValidators())
|
||||
|
||||
@@ -290,24 +290,17 @@ func ProcessBlockForStateRoot(
|
||||
return nil, errors.Wrap(err, "could not check if execution is enabled")
|
||||
}
|
||||
if enabled {
|
||||
executionData, err := blk.Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if blk.IsBlinded() {
|
||||
header, err := blk.Body().ExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = b.ProcessPayloadHeader(state, header)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution payload header")
|
||||
}
|
||||
state, err = b.ProcessPayloadHeader(state, executionData)
|
||||
} else {
|
||||
payload, err := blk.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = b.ProcessPayload(state, payload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution payload")
|
||||
}
|
||||
state, err = b.ProcessPayload(state, executionData)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution data")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -133,16 +133,15 @@ func TestProcessBlock_IncorrectProcessExits(t *testing.T) {
|
||||
}),
|
||||
},
|
||||
}
|
||||
au := util.AttestationUtil{}
|
||||
attesterSlashings := []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: ðpb.IndexedAttestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{}),
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{}),
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
Attestation_2: ðpb.IndexedAttestation{
|
||||
Data: au.HydrateAttestationData(ðpb.AttestationData{}),
|
||||
Data: util.HydrateAttestationData(ðpb.AttestationData{}),
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
@@ -153,7 +152,7 @@ func TestProcessBlock_IncorrectProcessExits(t *testing.T) {
|
||||
blockRoots = append(blockRoots, []byte{byte(i)})
|
||||
}
|
||||
require.NoError(t, beaconState.SetBlockRoots(blockRoots))
|
||||
blockAtt := au.HydrateAttestation(ðpb.Attestation{
|
||||
blockAtt := util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte("hello-world"), 32)},
|
||||
},
|
||||
@@ -256,8 +255,7 @@ func createFullBlockWithOperations(t *testing.T) (state.BeaconState,
|
||||
require.NoError(t, beaconState.SetValidators(validators))
|
||||
|
||||
mockRoot2 := [32]byte{'A'}
|
||||
au := util.AttestationUtil{}
|
||||
att1 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: mockRoot2[:]},
|
||||
},
|
||||
@@ -273,7 +271,7 @@ func createFullBlockWithOperations(t *testing.T) (state.BeaconState,
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
mockRoot3 := [32]byte{'B'}
|
||||
att2 := au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att2 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: mockRoot3[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, fieldparams.RootLength)},
|
||||
@@ -303,7 +301,7 @@ func createFullBlockWithOperations(t *testing.T) (state.BeaconState,
|
||||
|
||||
aggBits := bitfield.NewBitlist(1)
|
||||
aggBits.SetBitAt(0, true)
|
||||
blockAtt := au.HydrateAttestation(ðpb.Attestation{
|
||||
blockAtt := util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: beaconState.Slot(),
|
||||
Target: ðpb.Checkpoint{Epoch: time.CurrentEpoch(beaconState)},
|
||||
|
||||
@@ -53,6 +53,7 @@ type ReadOnlyDatabase interface {
|
||||
PowchainData(ctx context.Context) (*ethpb.ETH1ChainData, error)
|
||||
// Fee reicipients operations.
|
||||
FeeRecipientByValidatorID(ctx context.Context, id types.ValidatorIndex) (common.Address, error)
|
||||
RegistrationByValidatorID(ctx context.Context, id types.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
// origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
|
||||
@@ -17,6 +17,7 @@ go_library(
|
||||
"log.go",
|
||||
"migration.go",
|
||||
"migration_archived_index.go",
|
||||
"migration_blinded_beacon_blocks.go",
|
||||
"migration_block_slot_index.go",
|
||||
"migration_state_validators.go",
|
||||
"powchain.go",
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
// LastArchivedSlot from the db.
|
||||
func (s *Store) LastArchivedSlot(ctx context.Context) (types.Slot, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedSlot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedSlot")
|
||||
defer span.End()
|
||||
var index types.Slot
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
@@ -26,7 +26,7 @@ func (s *Store) LastArchivedSlot(ctx context.Context) (types.Slot, error) {
|
||||
|
||||
// LastArchivedRoot from the db.
|
||||
func (s *Store) LastArchivedRoot(ctx context.Context) [32]byte {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedRoot")
|
||||
defer span.End()
|
||||
|
||||
var blockRoot []byte
|
||||
@@ -44,7 +44,7 @@ func (s *Store) LastArchivedRoot(ctx context.Context) [32]byte {
|
||||
// ArchivedPointRoot returns the block root of an archived point from the DB.
|
||||
// This is essential for cold state management and to restore a cold state.
|
||||
func (s *Store) ArchivedPointRoot(ctx context.Context, slot types.Slot) [32]byte {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.ArchivedPointRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.ArchivedPointRoot")
|
||||
defer span.End()
|
||||
|
||||
var blockRoot []byte
|
||||
@@ -61,7 +61,7 @@ func (s *Store) ArchivedPointRoot(ctx context.Context, slot types.Slot) [32]byte
|
||||
|
||||
// HasArchivedPoint returns true if an archived point exists in DB.
|
||||
func (s *Store) HasArchivedPoint(ctx context.Context, slot types.Slot) bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.HasArchivedPoint")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasArchivedPoint")
|
||||
defer span.End()
|
||||
var exists bool
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
@@ -53,7 +54,7 @@ func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.Signe
|
||||
// at the time the chain was started, used to initialize the database and chain
|
||||
// without syncing from genesis.
|
||||
func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.OriginCheckpointBlockRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.OriginCheckpointBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
var root [32]byte
|
||||
@@ -72,7 +73,7 @@ func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
|
||||
// BackfillBlockRoot keeps track of the highest block available before the OriginCheckpointBlockRoot
|
||||
func (s *Store) BackfillBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
var root [32]byte
|
||||
@@ -130,7 +131,7 @@ func (s *Store) Blocks(ctx context.Context, f *filters.QueryFilter) ([]interface
|
||||
encoded := bkt.Get(keys[i])
|
||||
blk, err := unmarshalBlock(ctx, encoded)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "could not unmarshal block with key %#x", keys[i])
|
||||
}
|
||||
blocks = append(blocks, blk)
|
||||
blockRoots = append(blockRoots, bytesutil.ToBytes32(keys[i]))
|
||||
@@ -168,7 +169,7 @@ func (s *Store) BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32]b
|
||||
|
||||
// HasBlock checks if a block by root exists in the db.
|
||||
func (s *Store) HasBlock(ctx context.Context, blockRoot [32]byte) bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.HasBlock")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasBlock")
|
||||
defer span.End()
|
||||
if v, ok := s.blockCache.Get(string(blockRoot[:])); v != nil && ok {
|
||||
return true
|
||||
@@ -304,6 +305,16 @@ func (s *Store) SaveBlocks(ctx context.Context, blocks []interfaces.SignedBeacon
|
||||
if err := updateValueForIndices(ctx, indicesForBlocks[i], blockRoots[i], tx); err != nil {
|
||||
return errors.Wrap(err, "could not update DB indices")
|
||||
}
|
||||
if features.Get().EnableOnlyBlindedBeaconBlocks {
|
||||
blindedBlock, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
if !errors.Is(err, wrapper.ErrUnsupportedVersion) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
blk = blindedBlock
|
||||
}
|
||||
}
|
||||
s.blockCache.Set(string(blockRoots[i]), blk, int64(len(encodedBlocks[i])))
|
||||
if err := bkt.Put(blockRoots[i], encodedBlocks[i]); err != nil {
|
||||
return err
|
||||
@@ -315,7 +326,7 @@ func (s *Store) SaveBlocks(ctx context.Context, blocks []interfaces.SignedBeacon
|
||||
|
||||
// SaveHeadBlockRoot to the db.
|
||||
func (s *Store) SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveHeadBlockRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveHeadBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
hasStateSummary := s.hasStateSummaryBytes(tx, blockRoot)
|
||||
@@ -366,7 +377,7 @@ func (s *Store) GenesisBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
|
||||
// SaveGenesisBlockRoot to the db.
|
||||
func (s *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveGenesisBlockRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveGenesisBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
@@ -379,7 +390,7 @@ func (s *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) er
|
||||
// This value is used by a running beacon chain node to locate the state at the beginning
|
||||
// of the chain history, in places where genesis would typically be used.
|
||||
func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveOriginCheckpointBlockRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveOriginCheckpointBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
@@ -390,7 +401,7 @@ func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32
|
||||
// SaveBackfillBlockRoot is used to keep track of the most recently backfilled block root when
|
||||
// the node was initialized via checkpoint sync.
|
||||
func (s *Store) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
@@ -489,7 +500,7 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id types.Validato
|
||||
// SaveFeeRecipientsByValidatorIDs saves the fee recipients for validator ids.
|
||||
// Error is returned if `ids` and `recipients` are not the same length.
|
||||
func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, feeRecipients []common.Address) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveFeeRecipientByValidatorID")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveFeeRecipientByValidatorID")
|
||||
defer span.End()
|
||||
|
||||
if len(ids) != len(feeRecipients) {
|
||||
@@ -527,7 +538,7 @@ func (s *Store) RegistrationByValidatorID(ctx context.Context, id types.Validato
|
||||
// SaveRegistrationsByValidatorIDs saves the validator registrations for validator ids.
|
||||
// Error is returned if `ids` and `registrations` are not the same length.
|
||||
func (s *Store) SaveRegistrationsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveRegistrationsByValidatorIDs")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveRegistrationsByValidatorIDs")
|
||||
defer span.End()
|
||||
|
||||
if len(ids) != len(regs) {
|
||||
@@ -614,7 +625,7 @@ func blockRootsBySlotRange(
|
||||
bkt *bolt.Bucket,
|
||||
startSlotEncoded, endSlotEncoded, startEpochEncoded, endEpochEncoded, slotStepEncoded interface{},
|
||||
) ([][]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlotRange")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlotRange")
|
||||
defer span.End()
|
||||
|
||||
// Return nothing when all slot parameters are missing
|
||||
@@ -679,7 +690,7 @@ func blockRootsBySlotRange(
|
||||
|
||||
// blockRootsBySlot retrieves the block roots by slot
|
||||
func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) ([][32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlot")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlot")
|
||||
defer span.End()
|
||||
|
||||
bkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
@@ -700,7 +711,7 @@ func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) ([][32]
|
||||
// a map of bolt DB index buckets corresponding to each particular key for indices for
|
||||
// data, such as (shard indices bucket -> shard 5).
|
||||
func createBlockIndicesFromBlock(ctx context.Context, block interfaces.BeaconBlock) map[string][]byte {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromBlock")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromBlock")
|
||||
defer span.End()
|
||||
indicesByBucket := make(map[string][]byte)
|
||||
// Every index has a unique bucket for fast, binary-search
|
||||
@@ -728,7 +739,7 @@ func createBlockIndicesFromBlock(ctx context.Context, block interfaces.BeaconBlo
|
||||
// objects. If a certain filter criterion does not apply to
|
||||
// blocks, an appropriate error is returned.
|
||||
func createBlockIndicesFromFilters(ctx context.Context, f *filters.QueryFilter) (map[string][]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromFilters")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.createBlockIndicesFromFilters")
|
||||
defer span.End()
|
||||
indicesByBucket := make(map[string][]byte)
|
||||
for k, v := range f.Filters() {
|
||||
@@ -758,7 +769,7 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.SignedBeaconBlock
|
||||
var err error
|
||||
enc, err = snappy.Decode(nil, enc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not snappy decode block")
|
||||
}
|
||||
var rawBlock ssz.Unmarshaler
|
||||
switch {
|
||||
@@ -766,23 +777,23 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.SignedBeaconBlock
|
||||
// Marshal block bytes to altair beacon block.
|
||||
rawBlock = ðpb.SignedBeaconBlockAltair{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(altairKey):]); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not unmarshal Altair block")
|
||||
}
|
||||
case hasBellatrixKey(enc):
|
||||
rawBlock = ðpb.SignedBeaconBlockBellatrix{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(bellatrixKey):]); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not unmarshal Bellatrix block")
|
||||
}
|
||||
case hasBellatrixBlindKey(enc):
|
||||
rawBlock = ðpb.SignedBlindedBeaconBlockBellatrix{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(bellatrixBlindKey):]); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Bellatrix block")
|
||||
}
|
||||
default:
|
||||
// Marshal block bytes to phase 0 beacon block.
|
||||
rawBlock = ðpb.SignedBeaconBlock{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not unmarshal Phase0 block")
|
||||
}
|
||||
}
|
||||
return wrapper.WrappedSignedBeaconBlock(rawBlock)
|
||||
@@ -790,19 +801,41 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.SignedBeaconBlock
|
||||
|
||||
// marshal versioned beacon block from struct type down to bytes.
|
||||
func marshalBlock(_ context.Context, blk interfaces.SignedBeaconBlock) ([]byte, error) {
|
||||
obj, err := blk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var encodedBlock []byte
|
||||
var err error
|
||||
blockToSave := blk
|
||||
if features.Get().EnableOnlyBlindedBeaconBlocks {
|
||||
blindedBlock, err := blk.ToBlinded()
|
||||
switch {
|
||||
case errors.Is(err, wrapper.ErrUnsupportedVersion):
|
||||
encodedBlock, err = blk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal non-blinded block")
|
||||
}
|
||||
case err != nil:
|
||||
return nil, errors.Wrap(err, "could not convert block to blinded format")
|
||||
default:
|
||||
encodedBlock, err = blindedBlock.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal blinded block")
|
||||
}
|
||||
blockToSave = blindedBlock
|
||||
}
|
||||
} else {
|
||||
encodedBlock, err = blk.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
switch blk.Version() {
|
||||
switch blockToSave.Version() {
|
||||
case version.BellatrixBlind:
|
||||
return snappy.Encode(nil, append(bellatrixBlindKey, obj...)), nil
|
||||
return snappy.Encode(nil, append(bellatrixBlindKey, encodedBlock...)), nil
|
||||
case version.Bellatrix:
|
||||
return snappy.Encode(nil, append(bellatrixKey, obj...)), nil
|
||||
return snappy.Encode(nil, append(bellatrixKey, encodedBlock...)), nil
|
||||
case version.Altair:
|
||||
return snappy.Encode(nil, append(altairKey, obj...)), nil
|
||||
return snappy.Encode(nil, append(altairKey, encodedBlock...)), nil
|
||||
case version.Phase0:
|
||||
return snappy.Encode(nil, obj), nil
|
||||
return snappy.Encode(nil, encodedBlock), nil
|
||||
default:
|
||||
return nil, errors.New("Unknown block version")
|
||||
}
|
||||
|
||||
@@ -134,11 +134,17 @@ func TestStore_BlocksCRUD(t *testing.T) {
|
||||
retrievedBlock, err := db.Block(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, nil, retrievedBlock, "Expected nil block")
|
||||
|
||||
require.NoError(t, db.SaveBlock(ctx, blk))
|
||||
assert.Equal(t, true, db.HasBlock(ctx, blockRoot), "Expected block to exist in the db")
|
||||
retrievedBlock, err = db.Block(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(blk.Proto(), retrievedBlock.Proto()), "Wanted: %v, received: %v", blk, retrievedBlock)
|
||||
wanted := retrievedBlock
|
||||
if _, err := retrievedBlock.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = retrievedBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), retrievedBlock.Proto()), "Wanted: %v, received: %v", wanted, retrievedBlock)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -314,7 +320,13 @@ func TestStore_BlocksCRUD_NoCache(t *testing.T) {
|
||||
assert.Equal(t, true, db.HasBlock(ctx, blockRoot), "Expected block to exist in the db")
|
||||
retrievedBlock, err = db.Block(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(blk.Proto(), retrievedBlock.Proto()), "Wanted: %v, received: %v", blk, retrievedBlock)
|
||||
|
||||
wanted := blk
|
||||
if _, err := blk.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = blk.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), retrievedBlock.Proto()), "Wanted: %v, received: %v", wanted, retrievedBlock)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -524,7 +536,12 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
root := roots[0]
|
||||
b, err := db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block1.Proto(), b.Proto()), "Wanted: %v, received: %v", block1, b)
|
||||
wanted := block1
|
||||
if _, err := block1.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), b.Proto()), "Wanted: %v, received: %v", wanted, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 11)
|
||||
require.NoError(t, err)
|
||||
@@ -533,7 +550,12 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block2.Proto(), b.Proto()), "Wanted: %v, received: %v", block2, b)
|
||||
wanted2 := block2
|
||||
if _, err := block2.PbBellatrixBlock(); err == nil {
|
||||
wanted2, err = block2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted2.Proto(), b.Proto()), "Wanted: %v, received: %v", wanted2, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 101)
|
||||
require.NoError(t, err)
|
||||
@@ -542,7 +564,12 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block3.Proto(), b.Proto()), "Wanted: %v, received: %v", block3, b)
|
||||
wanted = block3
|
||||
if _, err := block3.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), b.Proto()), "Wanted: %v, received: %v", wanted, b)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -569,7 +596,12 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
root := roots[0]
|
||||
b, err := db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block1.Proto(), b.Proto()), "Wanted: %v, received: %v", block1, b)
|
||||
wanted := block1
|
||||
if _, err := block1.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = block1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), b.Proto()), "Wanted: %v, received: %v", wanted, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -577,7 +609,12 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(genesisBlock.Proto(), b.Proto()), "Wanted: %v, received: %v", genesisBlock, b)
|
||||
wanted = genesisBlock
|
||||
if _, err := genesisBlock.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), b.Proto()), "Wanted: %v, received: %v", wanted, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
@@ -585,7 +622,12 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(genesisBlock.Proto(), b.Proto()), "Wanted: %v, received: %v", genesisBlock, b)
|
||||
wanted = genesisBlock
|
||||
if _, err := genesisBlock.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), b.Proto()), "Wanted: %v, received: %v", wanted, b)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -671,15 +713,31 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
assert.Equal(t, 0, len(retrievedBlocks), "Unexpected number of blocks received, expected none")
|
||||
retrievedBlocks, err = db.BlocksBySlot(ctx, 20)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(b1.Proto(), retrievedBlocks[0].Proto()), "Wanted: %v, received: %v", b1, retrievedBlocks[0])
|
||||
|
||||
wanted := b1
|
||||
if _, err := b1.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = b1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(retrievedBlocks[0].Proto(), wanted.Proto()), "Wanted: %v, received: %v", retrievedBlocks[0], wanted)
|
||||
assert.Equal(t, true, len(retrievedBlocks) > 0, "Expected to have blocks")
|
||||
retrievedBlocks, err = db.BlocksBySlot(ctx, 100)
|
||||
require.NoError(t, err)
|
||||
if len(retrievedBlocks) != 2 {
|
||||
t.Fatalf("Expected 2 blocks, received %d blocks", len(retrievedBlocks))
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(b2.Proto(), retrievedBlocks[0].Proto()), "Wanted: %v, received: %v", b2, retrievedBlocks[0])
|
||||
assert.Equal(t, true, proto.Equal(b3.Proto(), retrievedBlocks[1].Proto()), "Wanted: %v, received: %v", b3, retrievedBlocks[1])
|
||||
wanted = b2
|
||||
if _, err := b2.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = b2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(wanted.Proto(), retrievedBlocks[0].Proto()), "Wanted: %v, received: %v", retrievedBlocks[0], wanted)
|
||||
wanted = b3
|
||||
if _, err := b3.PbBellatrixBlock(); err == nil {
|
||||
wanted, err = b3.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(retrievedBlocks[1].Proto(), wanted.Proto()), "Wanted: %v, received: %v", retrievedBlocks[1], wanted)
|
||||
assert.Equal(t, true, len(retrievedBlocks) > 0, "Expected to have blocks")
|
||||
|
||||
hasBlockRoots, retrievedBlockRoots, err := db.BlockRootsBySlot(ctx, 1)
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
// DepositContractAddress returns contract address is the address of
|
||||
// the deposit contract on the proof of work chain.
|
||||
func (s *Store) DepositContractAddress(ctx context.Context) ([]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.DepositContractAddress")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.DepositContractAddress")
|
||||
defer span.End()
|
||||
var addr []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
@@ -27,7 +27,7 @@ func (s *Store) DepositContractAddress(ctx context.Context) ([]byte, error) {
|
||||
|
||||
// SaveDepositContractAddress to the db. It returns an error if an address has been previously saved.
|
||||
func (s *Store) SaveDepositContractAddress(ctx context.Context, addr common.Address) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.VerifyContractAddress")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.VerifyContractAddress")
|
||||
defer span.End()
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func decode(ctx context.Context, data []byte, dst proto.Message) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.decode")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.decode")
|
||||
defer span.End()
|
||||
|
||||
data, err := snappy.Decode(nil, data)
|
||||
@@ -27,7 +27,7 @@ func decode(ctx context.Context, data []byte, dst proto.Message) error {
|
||||
}
|
||||
|
||||
func encode(ctx context.Context, msg proto.Message) ([]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.encode")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.encode")
|
||||
defer span.End()
|
||||
|
||||
if msg == nil || reflect.ValueOf(msg).IsNil() {
|
||||
|
||||
@@ -166,7 +166,7 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
|
||||
// Note: beacon blocks from the latest finalized epoch return true, whether or not they are
|
||||
// considered canonical in the "head view" of the beacon node.
|
||||
func (s *Store) IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.IsFinalizedBlock")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.IsFinalizedBlock")
|
||||
defer span.End()
|
||||
|
||||
var exists bool
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
)
|
||||
|
||||
@@ -9,4 +10,7 @@ func init() {
|
||||
if err := params.SetActive(params.MainnetTestConfig()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
features.Init(&features.Flags{
|
||||
EnableOnlyBlindedBeaconBlocks: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
prombolt "github.com/prysmaticlabs/prombbolt"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/iface"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/io/file"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
@@ -183,8 +185,13 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = prometheus.Register(createBoltCollector(kv.db))
|
||||
return kv, err
|
||||
if err = prometheus.Register(createBoltCollector(kv.db)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = kv.checkNeedsResync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
// ClearDB removes the previously stored database in the data directory.
|
||||
@@ -216,6 +223,23 @@ func (s *Store) DatabasePath() string {
|
||||
return s.databasePath
|
||||
}
|
||||
|
||||
func (s *Store) checkNeedsResync() error {
|
||||
return s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(migrationsBucket)
|
||||
hasDisabledFeature := !features.Get().EnableOnlyBlindedBeaconBlocks
|
||||
if hasDisabledFeature && bkt.Get(migrationBlindedBeaconBlocksKey) != nil {
|
||||
return fmt.Errorf(
|
||||
"you have disabled the flag %s, and your node must resync to ensure your "+
|
||||
"database is compatible. If you do not want to resync, please re-enable the %s flag",
|
||||
features.EnableOnlyBlindedBeaconBlocks.Name,
|
||||
features.EnableOnlyBlindedBeaconBlocks.Name,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func createBuckets(tx *bolt.Tx, buckets ...[]byte) error {
|
||||
for _, bucket := range buckets {
|
||||
if _, err := tx.CreateBucketIfNotExists(bucket); err != nil {
|
||||
|
||||
@@ -4,7 +4,9 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// setupDB instantiates and returns a Store instance.
|
||||
@@ -16,3 +18,17 @@ func setupDB(t testing.TB) *Store {
|
||||
})
|
||||
return db
|
||||
}
|
||||
|
||||
func Test_checkNeedsResync(t *testing.T) {
|
||||
store := setupDB(t)
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EnableOnlyBlindedBeaconBlocks: false,
|
||||
})
|
||||
defer resetFn()
|
||||
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(migrationsBucket)
|
||||
return bkt.Put(migrationBlindedBeaconBlocksKey, migrationCompleted)
|
||||
}))
|
||||
err := store.checkNeedsResync()
|
||||
require.ErrorContains(t, "your node must resync", err)
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ var migrations = []migration{
|
||||
migrateArchivedIndex,
|
||||
migrateBlockSlotIndex,
|
||||
migrateStateValidators,
|
||||
migrateBlindedBeaconBlocksEnabled,
|
||||
}
|
||||
|
||||
// RunMigrations defined in the migrations array.
|
||||
|
||||
27
beacon-chain/db/kv/migration_blinded_beacon_blocks.go
Normal file
27
beacon-chain/db/kv/migration_blinded_beacon_blocks.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var migrationBlindedBeaconBlocksKey = []byte("blinded-beacon-blocks-enabled")
|
||||
|
||||
func migrateBlindedBeaconBlocksEnabled(ctx context.Context, db *bolt.DB) error {
|
||||
if !features.Get().EnableOnlyBlindedBeaconBlocks {
|
||||
return nil // Only write to the migrations bucket if the feature is enabled.
|
||||
}
|
||||
if updateErr := db.Update(func(tx *bolt.Tx) error {
|
||||
mb := tx.Bucket(migrationsBucket)
|
||||
if b := mb.Get(migrationBlindedBeaconBlocksKey); bytes.Equal(b, migrationCompleted) {
|
||||
return nil // Migration already completed.
|
||||
}
|
||||
return mb.Put(migrationBlindedBeaconBlocksKey, migrationCompleted)
|
||||
}); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
// SavePowchainData saves the pow chain data.
|
||||
func (s *Store) SavePowchainData(ctx context.Context, data *v2.ETH1ChainData) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SavePowchainData")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SavePowchainData")
|
||||
defer span.End()
|
||||
|
||||
if data == nil {
|
||||
@@ -36,7 +36,7 @@ func (s *Store) SavePowchainData(ctx context.Context, data *v2.ETH1ChainData) er
|
||||
|
||||
// PowchainData retrieves the powchain data.
|
||||
func (s *Store) PowchainData(ctx context.Context) (*v2.ETH1ChainData, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.PowchainData")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.PowchainData")
|
||||
defer span.End()
|
||||
|
||||
var data *v2.ETH1ChainData
|
||||
|
||||
@@ -338,7 +338,7 @@ func (s *Store) storeValidatorEntriesSeparately(ctx context.Context, tx *bolt.Tx
|
||||
|
||||
// HasState checks if a state by root exists in the db.
|
||||
func (s *Store) HasState(ctx context.Context, blockRoot [32]byte) bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.HasState")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasState")
|
||||
defer span.End()
|
||||
hasState := false
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
@@ -615,7 +615,7 @@ func (s *Store) validatorEntries(ctx context.Context, blockRoot [32]byte) ([]*et
|
||||
|
||||
// retrieves and assembles the state information from multiple buckets.
|
||||
func (s *Store) stateBytes(ctx context.Context, blockRoot [32]byte) ([]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.stateBytes")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.stateBytes")
|
||||
defer span.End()
|
||||
var dst []byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
@@ -738,7 +738,7 @@ func (s *Store) HighestSlotStatesBelow(ctx context.Context, slot types.Slot) ([]
|
||||
// a map of bolt DB index buckets corresponding to each particular key for indices for
|
||||
// data, such as (shard indices bucket -> shard 5).
|
||||
func createStateIndicesFromStateSlot(ctx context.Context, slot types.Slot) map[string][]byte {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.createStateIndicesFromState")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.createStateIndicesFromState")
|
||||
defer span.End()
|
||||
indicesByBucket := make(map[string][]byte)
|
||||
// Every index has a unique bucket for fast, binary-search
|
||||
|
||||
@@ -64,7 +64,7 @@ func (s *Store) StateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.St
|
||||
|
||||
// HasStateSummary returns true if a state summary exists in DB.
|
||||
func (s *Store) HasStateSummary(ctx context.Context, blockRoot [32]byte) bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.HasStateSummary")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasStateSummary")
|
||||
defer span.End()
|
||||
|
||||
var hasSummary bool
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
// we might find roots `0x23` and `0x45` stored under that index. We can then
|
||||
// do a batch read for attestations corresponding to those roots.
|
||||
func lookupValuesForIndices(ctx context.Context, indicesByBucket map[string][]byte, tx *bolt.Tx) [][][]byte {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.lookupValuesForIndices")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.lookupValuesForIndices")
|
||||
defer span.End()
|
||||
values := make([][][]byte, 0, len(indicesByBucket))
|
||||
for k, v := range indicesByBucket {
|
||||
@@ -37,7 +37,7 @@ func lookupValuesForIndices(ctx context.Context, indicesByBucket map[string][]by
|
||||
// values stored at said index. Typically, indices are roots of data that can then
|
||||
// be used for reads or batch reads from the DB.
|
||||
func updateValueForIndices(ctx context.Context, indicesByBucket map[string][]byte, root []byte, tx *bolt.Tx) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.updateValueForIndices")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.updateValueForIndices")
|
||||
defer span.End()
|
||||
for k, idx := range indicesByBucket {
|
||||
bkt := tx.Bucket([]byte(k))
|
||||
@@ -63,7 +63,7 @@ func updateValueForIndices(ctx context.Context, indicesByBucket map[string][]byt
|
||||
|
||||
// deleteValueForIndices clears a root stored at each index.
|
||||
func deleteValueForIndices(ctx context.Context, indicesByBucket map[string][]byte, root []byte, tx *bolt.Tx) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.deleteValueForIndices")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.deleteValueForIndices")
|
||||
defer span.End()
|
||||
for k, idx := range indicesByBucket {
|
||||
bkt := tx.Bucket([]byte(k))
|
||||
|
||||
@@ -31,7 +31,7 @@ const (
|
||||
func (s *Store) LastEpochWrittenForValidators(
|
||||
ctx context.Context, validatorIndices []types.ValidatorIndex,
|
||||
) ([]*slashertypes.AttestedEpochForValidator, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.LastEpochWrittenForValidators")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastEpochWrittenForValidators")
|
||||
defer span.End()
|
||||
attestedEpochs := make([]*slashertypes.AttestedEpochForValidator, 0)
|
||||
encodedIndices := make([][]byte, len(validatorIndices))
|
||||
@@ -63,7 +63,7 @@ func (s *Store) LastEpochWrittenForValidators(
|
||||
func (s *Store) SaveLastEpochsWrittenForValidators(
|
||||
ctx context.Context, epochByValidator map[types.ValidatorIndex]types.Epoch,
|
||||
) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveLastEpochsWrittenForValidators")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveLastEpochsWrittenForValidators")
|
||||
defer span.End()
|
||||
encodedIndices := make([][]byte, 0, len(epochByValidator))
|
||||
encodedEpochs := make([][]byte, 0, len(epochByValidator))
|
||||
@@ -183,7 +183,7 @@ func (s *Store) CheckAttesterDoubleVotes(
|
||||
func (s *Store) AttestationRecordForValidator(
|
||||
ctx context.Context, validatorIdx types.ValidatorIndex, targetEpoch types.Epoch,
|
||||
) (*slashertypes.IndexedAttestationWrapper, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.AttestationRecordForValidator")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.AttestationRecordForValidator")
|
||||
defer span.End()
|
||||
var record *slashertypes.IndexedAttestationWrapper
|
||||
encIdx := encodeValidatorIndex(validatorIdx)
|
||||
@@ -215,7 +215,7 @@ func (s *Store) SaveAttestationRecordsForValidators(
|
||||
ctx context.Context,
|
||||
attestations []*slashertypes.IndexedAttestationWrapper,
|
||||
) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveAttestationRecordsForValidators")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveAttestationRecordsForValidators")
|
||||
defer span.End()
|
||||
encodedTargetEpoch := make([][]byte, len(attestations))
|
||||
encodedRecords := make([][]byte, len(attestations))
|
||||
@@ -259,7 +259,7 @@ func (s *Store) SaveAttestationRecordsForValidators(
|
||||
func (s *Store) LoadSlasherChunks(
|
||||
ctx context.Context, kind slashertypes.ChunkKind, diskKeys [][]byte,
|
||||
) ([][]uint16, []bool, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.LoadSlasherChunk")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LoadSlasherChunk")
|
||||
defer span.End()
|
||||
chunks := make([][]uint16, 0)
|
||||
var exists []bool
|
||||
@@ -290,7 +290,7 @@ func (s *Store) LoadSlasherChunks(
|
||||
func (s *Store) SaveSlasherChunks(
|
||||
ctx context.Context, kind slashertypes.ChunkKind, chunkKeys [][]byte, chunks [][]uint16,
|
||||
) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveSlasherChunks")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveSlasherChunks")
|
||||
defer span.End()
|
||||
encodedKeys := make([][]byte, len(chunkKeys))
|
||||
encodedChunks := make([][]byte, len(chunkKeys))
|
||||
@@ -320,7 +320,7 @@ func (s *Store) SaveSlasherChunks(
|
||||
func (s *Store) CheckDoubleBlockProposals(
|
||||
ctx context.Context, proposals []*slashertypes.SignedBlockHeaderWrapper,
|
||||
) ([]*ethpb.ProposerSlashing, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.CheckDoubleBlockProposals")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.CheckDoubleBlockProposals")
|
||||
defer span.End()
|
||||
proposerSlashings := make([]*ethpb.ProposerSlashing, 0, len(proposals))
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
@@ -359,7 +359,7 @@ func (s *Store) CheckDoubleBlockProposals(
|
||||
func (s *Store) BlockProposalForValidator(
|
||||
ctx context.Context, validatorIdx types.ValidatorIndex, slot types.Slot,
|
||||
) (*slashertypes.SignedBlockHeaderWrapper, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.BlockProposalForValidator")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlockProposalForValidator")
|
||||
defer span.End()
|
||||
var record *slashertypes.SignedBlockHeaderWrapper
|
||||
key, err := keyForValidatorProposal(slot, validatorIdx)
|
||||
@@ -387,7 +387,7 @@ func (s *Store) BlockProposalForValidator(
|
||||
func (s *Store) SaveBlockProposals(
|
||||
ctx context.Context, proposals []*slashertypes.SignedBlockHeaderWrapper,
|
||||
) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveBlockProposals")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlockProposals")
|
||||
defer span.End()
|
||||
encodedKeys := make([][]byte, len(proposals))
|
||||
encodedProposals := make([][]byte, len(proposals))
|
||||
|
||||
@@ -67,6 +67,7 @@ go_test(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
@@ -8,7 +8,6 @@ var errInvalidProposerBoostRoot = errors.New("invalid proposer boost root")
|
||||
var errUnknownFinalizedRoot = errors.New("unknown finalized root")
|
||||
var errUnknownJustifiedRoot = errors.New("unknown justified root")
|
||||
var errInvalidOptimisticStatus = errors.New("invalid optimistic status")
|
||||
var errUnknownPayloadHash = errors.New("unknown payload hash")
|
||||
var errInvalidNilCheckpoint = errors.New("invalid nil checkpoint")
|
||||
var errInvalidUnrealizedJustifiedEpoch = errors.New("invalid unrealized justified epoch")
|
||||
var errInvalidUnrealizedFinalizedEpoch = errors.New("invalid unrealized finalized epoch")
|
||||
|
||||
@@ -89,7 +89,7 @@ func (f *ForkChoice) Head(
|
||||
// ProcessAttestation processes attestation for vote accounting, it iterates around validator indices
|
||||
// and update their votes accordingly.
|
||||
func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []uint64, blockRoot [32]byte, targetEpoch types.Epoch) {
|
||||
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.ProcessAttestation")
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.ProcessAttestation")
|
||||
defer span.End()
|
||||
f.votesLock.Lock()
|
||||
defer f.votesLock.Unlock()
|
||||
@@ -150,8 +150,8 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
|
||||
return err
|
||||
}
|
||||
|
||||
if features.Get().PullTips {
|
||||
jc, fc = f.store.pullTips(ctx, state, node, jc, fc)
|
||||
if features.Get().EnablePullTips {
|
||||
jc, fc = f.store.pullTips(state, node, jc, fc)
|
||||
}
|
||||
return f.updateCheckpoints(ctx, jc, fc)
|
||||
}
|
||||
|
||||
@@ -65,8 +65,8 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
f.store.justifiedCheckpoint = bjcp
|
||||
}
|
||||
}
|
||||
if features.Get().PullTips {
|
||||
f.UpdateUnrealizedCheckpoints()
|
||||
if features.Get().EnablePullTips {
|
||||
f.updateUnrealizedCheckpoints()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -32,12 +32,6 @@ func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, pa
|
||||
return invalidRoots, errInvalidParentRoot
|
||||
}
|
||||
}
|
||||
// Check if last valid hash is an ancestor of the passed node.
|
||||
lastValid, ok := s.nodeByPayload[payloadHash]
|
||||
if !ok || lastValid == nil {
|
||||
s.nodesLock.Unlock()
|
||||
return invalidRoots, errUnknownPayloadHash
|
||||
}
|
||||
firstInvalid := node
|
||||
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != payloadHash; firstInvalid = firstInvalid.parent {
|
||||
if ctx.Err() != nil {
|
||||
|
||||
@@ -2,9 +2,11 @@ package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
@@ -30,6 +32,23 @@ func TestPruneInvalid(t *testing.T) {
|
||||
wantedRoots [][32]byte
|
||||
wantedErr error
|
||||
}{
|
||||
{ // Bogus LVH, root not in forkchoice
|
||||
[32]byte{'x'},
|
||||
[32]byte{'i'},
|
||||
[32]byte{'R'},
|
||||
13,
|
||||
[][32]byte{},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
// Bogus LVH
|
||||
[32]byte{'i'},
|
||||
[32]byte{'h'},
|
||||
[32]byte{'R'},
|
||||
12,
|
||||
[][32]byte{{'i'}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'j'},
|
||||
[32]byte{'b'},
|
||||
@@ -264,3 +283,109 @@ func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) {
|
||||
require.Equal(t, 2, len(f.store.nodeByRoot[[32]byte{'a'}].children))
|
||||
|
||||
}
|
||||
|
||||
// Pow | Pos
|
||||
//
|
||||
// CA -- A -- B -- C-----D
|
||||
// \ \--------------E
|
||||
// \
|
||||
// ----------------------F -- G
|
||||
// B is INVALID
|
||||
//
|
||||
func TestSetOptimisticToInvalid_ForkAtMerge(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 100, [32]byte{'r'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 101, [32]byte{'a'}, [32]byte{'r'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 102, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 103, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 104, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 105, [32]byte{'e'}, [32]byte{'b'}, [32]byte{'E'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 106, [32]byte{'f'}, [32]byte{'r'}, [32]byte{'F'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 107, [32]byte{'g'}, [32]byte{'f'}, [32]byte{'G'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(roots))
|
||||
sort.Slice(roots, func(i, j int) bool {
|
||||
return bytesutil.BytesToUint64BigEndian(roots[i][:]) < bytesutil.BytesToUint64BigEndian(roots[j][:])
|
||||
})
|
||||
require.DeepEqual(t, roots, [][32]byte{{'b'}, {'c'}, {'d'}, {'e'}})
|
||||
}
|
||||
|
||||
// Pow | Pos
|
||||
//
|
||||
// CA -------- B -- C-----D
|
||||
// \ \--------------E
|
||||
// \
|
||||
// --A -------------------------F -- G
|
||||
// B is INVALID
|
||||
//
|
||||
func TestSetOptimisticToInvalid_ForkAtMerge_bis(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 100, [32]byte{'r'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 101, [32]byte{'a'}, [32]byte{'r'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 102, [32]byte{'b'}, [32]byte{}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 103, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 104, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 105, [32]byte{'e'}, [32]byte{'b'}, [32]byte{'E'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 106, [32]byte{'f'}, [32]byte{'a'}, [32]byte{'F'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 107, [32]byte{'g'}, [32]byte{'f'}, [32]byte{'G'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(roots))
|
||||
sort.Slice(roots, func(i, j int) bool {
|
||||
return bytesutil.BytesToUint64BigEndian(roots[i][:]) < bytesutil.BytesToUint64BigEndian(roots[j][:])
|
||||
})
|
||||
require.DeepEqual(t, roots, [][32]byte{{'b'}, {'c'}, {'d'}, {'e'}})
|
||||
}
|
||||
|
||||
@@ -478,3 +478,21 @@ func TestForkChoice_computeProposerBoostScore(t *testing.T) {
|
||||
require.Equal(t, uint64(8), score)
|
||||
})
|
||||
}
|
||||
|
||||
// Regression test (11053)
|
||||
func TestForkChoice_missingPreviousProposerBoost(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
balances := make([]uint64, 64) // 64 active validators.
|
||||
for i := 0; i < len(balances); i++ {
|
||||
balances[i] = 10
|
||||
}
|
||||
driftGenesisTime(f, 1, 0)
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'r'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
f.store.previousProposerBoostRoot = [32]byte{'p'}
|
||||
_, err = f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -27,7 +27,9 @@ func (s *Store) applyProposerBoostScore(newBalances []uint64) error {
|
||||
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
|
||||
previousNode, ok := s.nodeByRoot[s.previousProposerBoostRoot]
|
||||
if !ok || previousNode == nil {
|
||||
return errInvalidProposerBoostRoot
|
||||
s.previousProposerBoostRoot = [32]byte{}
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf(fmt.Sprintf("invalid prev root %#x", s.previousProposerBoostRoot))
|
||||
return nil
|
||||
}
|
||||
previousNode.balance -= s.previousProposerBoostScore
|
||||
}
|
||||
@@ -35,7 +37,9 @@ func (s *Store) applyProposerBoostScore(newBalances []uint64) error {
|
||||
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash {
|
||||
currentNode, ok := s.nodeByRoot[s.proposerBoostRoot]
|
||||
if !ok || currentNode == nil {
|
||||
return errInvalidProposerBoostRoot
|
||||
s.proposerBoostRoot = [32]byte{}
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf(fmt.Sprintf("invalid current root %#x", s.proposerBoostRoot))
|
||||
return nil
|
||||
}
|
||||
proposerScore, err = computeProposerBoostScore(newBalances)
|
||||
if err != nil {
|
||||
@@ -63,7 +67,7 @@ func (s *Store) PruneThreshold() uint64 {
|
||||
// head starts from justified root and then follows the best descendant links
|
||||
// to find the best block for head. This function assumes a lock on s.nodesLock
|
||||
func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.head")
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.head")
|
||||
defer span.End()
|
||||
s.checkpointsLock.RLock()
|
||||
defer s.checkpointsLock.RUnlock()
|
||||
@@ -109,7 +113,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
slot types.Slot,
|
||||
root, parentRoot, payloadHash [fieldparams.RootLength]byte,
|
||||
justifiedEpoch, finalizedEpoch types.Epoch) (*Node, error) {
|
||||
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
|
||||
defer span.End()
|
||||
|
||||
s.nodesLock.Lock()
|
||||
@@ -196,7 +200,7 @@ func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalized
|
||||
// root is different than the current store finalized root, and the number of the store has met prune threshold.
|
||||
// This function does not prune for invalid optimistically synced nodes, it deals only with pruning upon finalization
|
||||
func (s *Store) prune(ctx context.Context) error {
|
||||
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Prune")
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Prune")
|
||||
defer span.End()
|
||||
|
||||
s.nodesLock.Lock()
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
@@ -44,20 +42,19 @@ func (s *Store) setUnrealizedFinalizedEpoch(root [32]byte, epoch types.Epoch) er
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateUnrealizedCheckpoints "realizes" the unrealized justified and finalized
|
||||
// epochs stored within nodes. It should be called at the beginning of each
|
||||
// epoch
|
||||
func (f *ForkChoice) UpdateUnrealizedCheckpoints() {
|
||||
// updateUnrealizedCheckpoints "realizes" the unrealized justified and finalized
|
||||
// epochs stored within nodes. It should be called at the beginning of each epoch.
|
||||
func (f *ForkChoice) updateUnrealizedCheckpoints() {
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
for _, node := range f.store.nodeByRoot {
|
||||
node.justifiedEpoch = node.unrealizedJustifiedEpoch
|
||||
node.finalizedEpoch = node.unrealizedFinalizedEpoch
|
||||
if node.justifiedEpoch > f.store.justifiedCheckpoint.Epoch {
|
||||
f.store.justifiedCheckpoint = f.store.unrealizedJustifiedCheckpoint
|
||||
if node.justifiedEpoch > f.store.bestJustifiedCheckpoint.Epoch {
|
||||
f.store.bestJustifiedCheckpoint = f.store.unrealizedJustifiedCheckpoint
|
||||
}
|
||||
f.store.justifiedCheckpoint = f.store.unrealizedJustifiedCheckpoint
|
||||
}
|
||||
if node.finalizedEpoch > f.store.finalizedCheckpoint.Epoch {
|
||||
f.store.justifiedCheckpoint = f.store.unrealizedJustifiedCheckpoint
|
||||
@@ -66,33 +63,37 @@ func (f *ForkChoice) UpdateUnrealizedCheckpoints() {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Store) pullTips(ctx context.Context, state state.BeaconState, node *Node, jc, fc *ethpb.Checkpoint) (*ethpb.Checkpoint, *ethpb.Checkpoint) {
|
||||
func (s *Store) pullTips(state state.BeaconState, node *Node, jc, fc *ethpb.Checkpoint) (*ethpb.Checkpoint, *ethpb.Checkpoint) {
|
||||
s.nodesLock.Lock()
|
||||
defer s.nodesLock.Unlock()
|
||||
|
||||
if node.parent == nil { // Nothing to do if the parent is nil.
|
||||
return jc, fc
|
||||
}
|
||||
|
||||
s.checkpointsLock.Lock()
|
||||
defer s.checkpointsLock.Unlock()
|
||||
|
||||
var uj, uf *ethpb.Checkpoint
|
||||
currentSlot := slots.CurrentSlot(s.genesisTime)
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
currentEpoch := slots.ToEpoch(slots.CurrentSlot(s.genesisTime))
|
||||
stateSlot := state.Slot()
|
||||
stateEpoch := slots.ToEpoch(stateSlot)
|
||||
if node.parent == nil {
|
||||
return jc, fc
|
||||
}
|
||||
currJustified := node.parent.unrealizedJustifiedEpoch == currentEpoch
|
||||
prevJustified := node.parent.unrealizedJustifiedEpoch+1 == currentEpoch
|
||||
tooEarlyForCurr := slots.SinceEpochStarts(stateSlot)*3 < params.BeaconConfig().SlotsPerEpoch*2
|
||||
// Exit early if it's justified or too early to be justified.
|
||||
if currJustified || (stateEpoch == currentEpoch && prevJustified && tooEarlyForCurr) {
|
||||
node.unrealizedJustifiedEpoch = node.parent.unrealizedJustifiedEpoch
|
||||
node.unrealizedFinalizedEpoch = node.parent.unrealizedFinalizedEpoch
|
||||
return jc, fc
|
||||
}
|
||||
|
||||
uj, uf, err := precompute.UnrealizedCheckpoints(ctx, state)
|
||||
uj, uf, err := precompute.UnrealizedCheckpoints(state)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not compute unrealized checkpoints")
|
||||
uj, uf = jc, fc
|
||||
}
|
||||
node.unrealizedJustifiedEpoch, node.unrealizedFinalizedEpoch = uj.Epoch, uf.Epoch
|
||||
|
||||
// Update store's unrealized checkpoints.
|
||||
if uj.Epoch > s.unrealizedJustifiedCheckpoint.Epoch {
|
||||
s.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{
|
||||
Epoch: uj.Epoch, Root: bytesutil.ToBytes32(uj.Root),
|
||||
@@ -107,6 +108,8 @@ func (s *Store) pullTips(ctx context.Context, state state.BeaconState, node *Nod
|
||||
}
|
||||
}
|
||||
|
||||
// Update node's checkpoints.
|
||||
node.unrealizedJustifiedEpoch, node.unrealizedFinalizedEpoch = uj.Epoch, uf.Epoch
|
||||
if stateEpoch < currentEpoch {
|
||||
jc, fc = uj, uf
|
||||
node.justifiedEpoch = uj.Epoch
|
||||
|
||||
@@ -98,7 +98,7 @@ func TestStore_LongFork(t *testing.T) {
|
||||
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'c'}].weight)
|
||||
|
||||
// Update unrealized justification, c becomes head
|
||||
f.UpdateUnrealizedCheckpoints()
|
||||
f.updateUnrealizedCheckpoints()
|
||||
headRoot, err = f.Head(ctx, []uint64{100})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'c'}, headRoot)
|
||||
@@ -179,7 +179,7 @@ func TestStore_NoDeadLock(t *testing.T) {
|
||||
require.Equal(t, types.Epoch(0), f.FinalizedCheckpoint().Epoch)
|
||||
|
||||
// Realized Justified checkpoints, H becomes head
|
||||
f.UpdateUnrealizedCheckpoints()
|
||||
f.updateUnrealizedCheckpoints()
|
||||
headRoot, err = f.Head(ctx, []uint64{100})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'h'}, headRoot)
|
||||
@@ -240,7 +240,7 @@ func TestStore_ForkNextEpoch(t *testing.T) {
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'d'}, 1))
|
||||
f.store.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1}
|
||||
f.UpdateUnrealizedCheckpoints()
|
||||
f.updateUnrealizedCheckpoints()
|
||||
headRoot, err = f.Head(ctx, []uint64{100})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'d'}, headRoot)
|
||||
@@ -251,7 +251,7 @@ func TestStore_ForkNextEpoch(t *testing.T) {
|
||||
|
||||
func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
PullTips: true,
|
||||
EnablePullTips: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -17,3 +17,4 @@ var errInvalidNilCheckpoint = errors.New("invalid nil checkpoint")
|
||||
var errInvalidUnrealizedJustifiedEpoch = errors.New("invalid unrealized justified epoch")
|
||||
var errInvalidUnrealizedFinalizedEpoch = errors.New("invalid unrealized finalized epoch")
|
||||
var errNilBlockHeader = errors.New("invalid nil block header")
|
||||
var errInvalidParentRoot = errors.New("invalid parent root")
|
||||
|
||||
@@ -19,7 +19,7 @@ func computeDeltas(
|
||||
oldBalances, newBalances []uint64,
|
||||
slashedIndices map[types.ValidatorIndex]bool,
|
||||
) ([]int, []Vote, error) {
|
||||
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.computeDeltas")
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.computeDeltas")
|
||||
defer span.End()
|
||||
|
||||
deltas := make([]int, count)
|
||||
|
||||
@@ -65,7 +65,7 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
f.store.justifiedCheckpoint = bjcp
|
||||
}
|
||||
}
|
||||
if features.Get().PullTips {
|
||||
if features.Get().EnablePullTips {
|
||||
f.UpdateUnrealizedCheckpoints()
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -56,8 +56,8 @@ func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoo
|
||||
defer f.store.nodesLock.Unlock()
|
||||
invalidRoots := make([][32]byte, 0)
|
||||
lastValidIndex, ok := f.store.payloadIndices[payloadHash]
|
||||
if !ok || lastValidIndex == NonExistentNode {
|
||||
return invalidRoots, errInvalidFinalizedNode
|
||||
if !ok {
|
||||
lastValidIndex = uint64(len(f.store.nodes))
|
||||
}
|
||||
|
||||
invalidIndex, ok := f.store.nodesIndices[root]
|
||||
|
||||
@@ -385,8 +385,6 @@ func TestSetOptimisticToInvalid_InvalidRoots(t *testing.T) {
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'p'}, [32]byte{'p'}, [32]byte{'B'})
|
||||
require.ErrorIs(t, ErrUnknownNodeRoot, err)
|
||||
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'a'}, [32]byte{}, [32]byte{'p'})
|
||||
require.ErrorIs(t, errInvalidFinalizedNode, err)
|
||||
}
|
||||
|
||||
// This is a regression test (10445)
|
||||
@@ -417,3 +415,140 @@ func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
|
||||
f.store.proposerBoostLock.RUnlock()
|
||||
}
|
||||
|
||||
// This is a regression test (10996)
|
||||
func TestSetOptimisticToInvalid_BogusLVH(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
state, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root))
|
||||
|
||||
state, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root))
|
||||
|
||||
invalidRoots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'R'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(invalidRoots))
|
||||
require.Equal(t, [32]byte{'b'}, invalidRoots[0])
|
||||
}
|
||||
|
||||
// This is a regression test (10996)
|
||||
func TestSetOptimisticToInvalid_BogusLVH_RotNotImported(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
state, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root))
|
||||
|
||||
state, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root))
|
||||
|
||||
invalidRoots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'R'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(invalidRoots))
|
||||
}
|
||||
|
||||
// Pow | Pos
|
||||
//
|
||||
// CA -- A -- B -- C-----D
|
||||
// \ \--------------E
|
||||
// \
|
||||
// ----------------------F -- G
|
||||
// B is INVALID
|
||||
//
|
||||
func TestSetOptimisticToInvalid_ForkAtMerge(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 100, [32]byte{'r'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 101, [32]byte{'a'}, [32]byte{'r'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 102, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 103, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 104, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 105, [32]byte{'e'}, [32]byte{'b'}, [32]byte{'E'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 106, [32]byte{'f'}, [32]byte{'r'}, [32]byte{'F'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 107, [32]byte{'g'}, [32]byte{'f'}, [32]byte{'G'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(roots))
|
||||
require.Equal(t, true, slicesEqual(roots, [][32]byte{[32]byte{'b'}, [32]byte{'c'}, [32]byte{'d'}, [32]byte{'e'}}))
|
||||
}
|
||||
|
||||
// Pow | Pos
|
||||
//
|
||||
// CA -------- B -- C-----D
|
||||
// \ \--------------E
|
||||
// \
|
||||
// --A -------------------------F -- G
|
||||
// B is INVALID
|
||||
//
|
||||
func TestSetOptimisticToInvalid_ForkAtMerge_bis(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 100, [32]byte{'r'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 101, [32]byte{'a'}, [32]byte{'r'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 102, [32]byte{'b'}, [32]byte{}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 103, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 104, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 105, [32]byte{'e'}, [32]byte{'b'}, [32]byte{'E'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 106, [32]byte{'f'}, [32]byte{'a'}, [32]byte{'F'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 107, [32]byte{'g'}, [32]byte{'f'}, [32]byte{'G'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'c'}, [32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(roots))
|
||||
require.Equal(t, [32]byte{'d'}, roots[0])
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@ func (f *ForkChoice) Head(ctx context.Context, justifiedStateBalances []uint64)
|
||||
// ProcessAttestation processes attestation for vote accounting, it iterates around validator indices
|
||||
// and update their votes accordingly.
|
||||
func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []uint64, blockRoot [32]byte, targetEpoch types.Epoch) {
|
||||
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.ProcessAttestation")
|
||||
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.ProcessAttestation")
|
||||
defer span.End()
|
||||
f.votesLock.Lock()
|
||||
defer f.votesLock.Unlock()
|
||||
@@ -155,8 +155,8 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
|
||||
return err
|
||||
}
|
||||
|
||||
if features.Get().PullTips {
|
||||
jc, fc = f.store.pullTips(ctx, state, node, jc, fc)
|
||||
if features.Get().EnablePullTips {
|
||||
jc, fc = f.store.pullTips(state, node, jc, fc)
|
||||
}
|
||||
return f.updateCheckpoints(ctx, jc, fc)
|
||||
}
|
||||
@@ -470,7 +470,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
slot types.Slot,
|
||||
root, parent, payloadHash [32]byte,
|
||||
justifiedEpoch, finalizedEpoch types.Epoch) (*Node, error) {
|
||||
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.insert")
|
||||
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.insert")
|
||||
defer span.End()
|
||||
|
||||
s.nodesLock.Lock()
|
||||
@@ -541,7 +541,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
func (s *Store) applyWeightChanges(
|
||||
ctx context.Context, newBalances []uint64, delta []int,
|
||||
) error {
|
||||
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.applyWeightChanges")
|
||||
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.applyWeightChanges")
|
||||
defer span.End()
|
||||
|
||||
// The length of the nodes can not be different than length of the delta.
|
||||
@@ -746,7 +746,7 @@ func (s *Store) updateBestChildAndDescendant(parentIndex, childIndex uint64) err
|
||||
// prune prunes the store with the new finalized root. The tree is only
|
||||
// pruned if the number of the nodes in store has met prune threshold.
|
||||
func (s *Store) prune(ctx context.Context) error {
|
||||
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.prune")
|
||||
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.prune")
|
||||
defer span.End()
|
||||
|
||||
s.nodesLock.Lock()
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package protoarray
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
@@ -77,16 +75,18 @@ func (f *ForkChoice) UpdateUnrealizedCheckpoints() {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Store) pullTips(ctx context.Context, state state.BeaconState, node *Node, jc, fc *ethpb.Checkpoint) (*ethpb.Checkpoint, *ethpb.Checkpoint) {
|
||||
var uj, uf *ethpb.Checkpoint
|
||||
func (s *Store) pullTips(state state.BeaconState, node *Node, jc, fc *ethpb.Checkpoint) (*ethpb.Checkpoint, *ethpb.Checkpoint) {
|
||||
s.nodesLock.Lock()
|
||||
defer s.nodesLock.Unlock()
|
||||
|
||||
currentSlot := slots.CurrentSlot(s.genesisTime)
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
stateSlot := state.Slot()
|
||||
stateEpoch := slots.ToEpoch(stateSlot)
|
||||
if node.parent == NonExistentNode {
|
||||
if node.parent == NonExistentNode { // Nothing to do if the parent is nil.
|
||||
return jc, fc
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(slots.CurrentSlot(s.genesisTime))
|
||||
stateSlot := state.Slot()
|
||||
stateEpoch := slots.ToEpoch(stateSlot)
|
||||
|
||||
parent := s.nodes[node.parent]
|
||||
currJustified := parent.unrealizedJustifiedEpoch == currentEpoch
|
||||
prevJustified := parent.unrealizedJustifiedEpoch+1 == currentEpoch
|
||||
@@ -97,12 +97,13 @@ func (s *Store) pullTips(ctx context.Context, state state.BeaconState, node *Nod
|
||||
return jc, fc
|
||||
}
|
||||
|
||||
uj, uf, err := precompute.UnrealizedCheckpoints(ctx, state)
|
||||
uj, uf, err := precompute.UnrealizedCheckpoints(state)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not compute unrealized checkpoints")
|
||||
uj, uf = jc, fc
|
||||
}
|
||||
node.unrealizedJustifiedEpoch, node.unrealizedFinalizedEpoch = uj.Epoch, uf.Epoch
|
||||
|
||||
// Update store's unrealized checkpoints.
|
||||
s.checkpointsLock.Lock()
|
||||
if uj.Epoch > s.unrealizedJustifiedCheckpoint.Epoch {
|
||||
s.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{
|
||||
@@ -117,12 +118,15 @@ func (s *Store) pullTips(ctx context.Context, state state.BeaconState, node *Nod
|
||||
Epoch: uf.Epoch, Root: bytesutil.ToBytes32(uf.Root),
|
||||
}
|
||||
}
|
||||
s.checkpointsLock.Unlock()
|
||||
|
||||
// Update node's checkpoints.
|
||||
node.unrealizedJustifiedEpoch, node.unrealizedFinalizedEpoch = uj.Epoch, uf.Epoch
|
||||
if stateEpoch < currentEpoch {
|
||||
jc, fc = uj, uf
|
||||
node.justifiedEpoch = uj.Epoch
|
||||
node.finalizedEpoch = uf.Epoch
|
||||
}
|
||||
s.checkpointsLock.Unlock()
|
||||
|
||||
return jc, fc
|
||||
}
|
||||
|
||||
@@ -252,7 +252,7 @@ func TestStore_ForkNextEpoch(t *testing.T) {
|
||||
|
||||
func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
PullTips: true,
|
||||
EnablePullTips: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
ctx := context.Background()
|
||||
|
||||
69
beacon-chain/geninit/clock.go
Normal file
69
beacon-chain/geninit/clock.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package geninit
|
||||
|
||||
import (
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Clock abstracts important time-related concerns in the beacon chain:
|
||||
// - genesis time
|
||||
// - provides a time.Now() construct that can be overriden in tests
|
||||
// - syncronization point for code that needs to know the genesis time
|
||||
// - CurrentSlot: convenience conversion for current time -> slot
|
||||
// - support backwards compatibility with the TimeFetcher interface
|
||||
type Clock interface {
|
||||
GenesisTime() time.Time
|
||||
CurrentSlot() types.Slot
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
// clock is a type that fulfills the TimeFetcher interface. This can be used in a number of places where
|
||||
// blockchain.ChainInfoFetcher has historically been used.
|
||||
type clock struct {
|
||||
genesis time.Time
|
||||
now Now
|
||||
}
|
||||
var _ Clock = &clock{}
|
||||
|
||||
// clock provides an accessor to the embedded time, also fulfilling the blockchain.TimeFetcher interface.
|
||||
func (gt clock) GenesisTime() time.Time {
|
||||
return gt.genesis
|
||||
}
|
||||
|
||||
// CurrentSlot returns the current slot relative to the time.Time value clock embeds.
|
||||
func (gt clock) CurrentSlot() types.Slot {
|
||||
return slots.Duration(gt.genesis, gt.now())
|
||||
}
|
||||
|
||||
// Now provides a value for time.Now() that can be overriden in tests.
|
||||
func (gt clock) Now() time.Time {
|
||||
return gt.now()
|
||||
}
|
||||
|
||||
// ClockOpt is a functional option to change the behavior of a clock value made by NewClock.
|
||||
// It is primarily intended as a way to inject an alternate time.Now() callback (WithNow) for testing.
|
||||
type ClockOpt func(*clock)
|
||||
|
||||
// WithNow allows tests in particular to inject an alternate implementation of time.Now (vs using system time)
|
||||
func WithNow(n Now) ClockOpt {
|
||||
return func(gt *clock) {
|
||||
gt.now = n
|
||||
}
|
||||
}
|
||||
|
||||
// NewClock constructs a clock value using the given time value. Optional ClockOpt can be provided.
|
||||
// If an implementation of the Now function type is not provided (via WithNow), time.Now (system time) will be used by default.
|
||||
func NewClock(t time.Time, opts ...ClockOpt) clock {
|
||||
gt := clock{genesis: t}
|
||||
for _, o := range opts {
|
||||
o(>)
|
||||
}
|
||||
if gt.now == nil {
|
||||
gt.now = time.Now
|
||||
}
|
||||
return gt
|
||||
}
|
||||
|
||||
// Now is a function that can return the current time. This will be time.Now by default, but can be overridden for tests.
|
||||
type Now func() time.Time
|
||||
84
beacon-chain/geninit/service.go
Normal file
84
beacon-chain/geninit/service.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package geninit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
"go.opencensus.io/trace"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/runtime"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
powWaiter ClockWaiter
|
||||
genesisSetter ClockSetter
|
||||
powFetcher powchain.ChainStartFetcher
|
||||
d db.HeadAccessDatabase
|
||||
}
|
||||
var _ runtime.Service = &Service{}
|
||||
|
||||
type ServiceOption func(*Service)
|
||||
|
||||
func New(ctx context.Context, pw ClockWaiter, gs ClockSetter, f powchain.ChainStartFetcher, d db.HeadAccessDatabase, opts ...ServiceOption) (*Service, error) {
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
powWaiter: pw,
|
||||
genesisSetter: gs,
|
||||
powFetcher: f,
|
||||
d: d,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(s)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Service) Start() {
|
||||
go s.run()
|
||||
}
|
||||
|
||||
func (s *Service) run() {
|
||||
c, err := s.powWaiter.WaitForClock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("timeout waiting for genesis timestamp")
|
||||
}
|
||||
if err = s.saveGenesis(c); err != nil {
|
||||
log.Fatalf("Could not initialize beacon chain, %s", err.Error())
|
||||
}
|
||||
s.genesisSetter.SetGenesisClock(c)
|
||||
}
|
||||
|
||||
func (s *Service) saveGenesis(c Clock) error {
|
||||
ctx, span := trace.StartSpan(s.ctx, "beacon-chain.geninit.Service.saveGenesis")
|
||||
defer span.End()
|
||||
eth1 := s.powFetcher.ChainStartEth1Data()
|
||||
pst := s.powFetcher.PreGenesisState()
|
||||
st, err := transition.OptimizedGenesisBeaconState(uint64(c.GenesisTime().Unix()), pst, eth1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.d.SaveGenesisData(ctx, st); err != nil {
|
||||
return errors.Wrap(err, "db error, could not save genesis data")
|
||||
}
|
||||
log.Info("Initialized beacon chain genesis state")
|
||||
// Clear out all pre-genesis data now that the state is initialized.
|
||||
s.powFetcher.ClearPreGenesisData()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type GenesisReady struct {
|
||||
time time.Time
|
||||
}
|
||||
50
beacon-chain/geninit/waiter.go
Normal file
50
beacon-chain/geninit/waiter.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package geninit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ClockWaiter interface {
|
||||
WaitForClock(context.Context) (Clock, error)
|
||||
}
|
||||
|
||||
type ClockSetter interface {
|
||||
SetGenesisTime(time.Time)
|
||||
SetGenesisClock(Clock)
|
||||
}
|
||||
|
||||
type ClockSync struct {
|
||||
ready chan struct{}
|
||||
c Clock
|
||||
}
|
||||
|
||||
func (w *ClockSync) SetGenesisTime(g time.Time) {
|
||||
w.c = NewClock(g)
|
||||
close(w.ready)
|
||||
w.ready = nil
|
||||
}
|
||||
|
||||
func (w *ClockSync) SetGenesisClock(c Clock) {
|
||||
w.c = c
|
||||
close(w.ready)
|
||||
w.ready = nil
|
||||
}
|
||||
|
||||
func (w *ClockSync) WaitForClock(ctx context.Context) (Clock, error) {
|
||||
if w.ready == nil {
|
||||
return w.c, nil
|
||||
}
|
||||
select {
|
||||
case <-w.ready:
|
||||
return w.c, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func NewClockSync() *ClockSync {
|
||||
return &ClockSync{
|
||||
ready: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
)
|
||||
|
||||
func TestProcessSlashings(t *testing.T) {
|
||||
au := util.AttestationUtil{}
|
||||
tests := []struct {
|
||||
name string
|
||||
block *ethpb.BeaconBlock
|
||||
@@ -78,13 +77,13 @@ func TestProcessSlashings(t *testing.T) {
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Attestation_1: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{1, 3, 4},
|
||||
}),
|
||||
Attestation_2: au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Attestation_2: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1, 5, 6},
|
||||
}),
|
||||
},
|
||||
@@ -100,13 +99,13 @@ func TestProcessSlashings(t *testing.T) {
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Attestation_1: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{1, 3, 4},
|
||||
}),
|
||||
Attestation_2: au.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Attestation_2: util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{3, 5, 6},
|
||||
}),
|
||||
},
|
||||
|
||||
@@ -172,7 +172,9 @@ func TestStart(t *testing.T) {
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance")
|
||||
require.LogsContain(t, hook, "\"Starting service\" ValidatorIndices=\"[1 2 12 15]\"")
|
||||
s.Lock()
|
||||
require.Equal(t, s.isLogging, true, "monitor is not running")
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
func TestInitializePerformanceStructures(t *testing.T) {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user