mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
145 Commits
debugLogs
...
capella-po
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4903a809f2 | ||
|
|
e1a2267f86 | ||
|
|
3c9e4ee7f7 | ||
|
|
9ba32c9acd | ||
|
|
d23008452e | ||
|
|
f397cba1e0 | ||
|
|
aa2bbb676f | ||
|
|
1f87322ddf | ||
|
|
837fac628a | ||
|
|
7d3e627d2d | ||
|
|
016a027c48 | ||
|
|
3040f3ae4d | ||
|
|
c8f1d51280 | ||
|
|
1583e93b48 | ||
|
|
849457df81 | ||
|
|
903cab75ee | ||
|
|
ee108d4aff | ||
|
|
49bcc58762 | ||
|
|
a08baf4a14 | ||
|
|
8c56dfdd46 | ||
|
|
dcdd9af9db | ||
|
|
a464cf5c60 | ||
|
|
5cb5ce7e14 | ||
|
|
6e26a6f128 | ||
|
|
b512b92a8a | ||
|
|
b0601580ef | ||
|
|
c1f29ea651 | ||
|
|
910822400f | ||
|
|
881d1d435a | ||
|
|
d1aae0c941 | ||
|
|
57bdb907cc | ||
|
|
15d683c78f | ||
|
|
bf6c8ced7d | ||
|
|
78fb685027 | ||
|
|
c0297ca0c2 | ||
|
|
d51fb3a648 | ||
|
|
98c0b23350 | ||
|
|
3ab486e140 | ||
|
|
039a0fffba | ||
|
|
90ec640e7a | ||
|
|
10acd31d25 | ||
|
|
f52dbe3c8f | ||
|
|
df1e8b33d8 | ||
|
|
cdb4ee42cc | ||
|
|
d29baec77e | ||
|
|
166c284ca4 | ||
|
|
2654e3f080 | ||
|
|
0adc54b7ff | ||
|
|
1cbd7e9888 | ||
|
|
ed65688252 | ||
|
|
0a9e1658dd | ||
|
|
31d4a4cd11 | ||
|
|
fbc4e73d31 | ||
|
|
c1d4eaa79d | ||
|
|
760af6428e | ||
|
|
4bae07fdbb | ||
|
|
dfa0ccf626 | ||
|
|
1a51fdbd58 | ||
|
|
368a99ec8d | ||
|
|
1c7e734918 | ||
|
|
42808a0c29 | ||
|
|
11cbe8fcb9 | ||
|
|
764d1325bf | ||
|
|
0cf30e9022 | ||
|
|
227b20f368 | ||
|
|
d7d70bc25b | ||
|
|
82f6ddb693 | ||
|
|
9e4e82d2c5 | ||
|
|
9838369fe9 | ||
|
|
6085ad1bfa | ||
|
|
d3851b27df | ||
|
|
d6100dfdcb | ||
|
|
c2144dac86 | ||
|
|
a47ff569a8 | ||
|
|
f8be022ef2 | ||
|
|
4f39e6b685 | ||
|
|
c67b000633 | ||
|
|
02f7443586 | ||
|
|
6275e7df4e | ||
|
|
1b6b52fda1 | ||
|
|
5fa1fd84b9 | ||
|
|
bd0c9f9e8d | ||
|
|
2532bb370c | ||
|
|
12efc6c2c1 | ||
|
|
a6cc9ac9c5 | ||
|
|
031f5845a2 | ||
|
|
b88559726c | ||
|
|
938fca1933 | ||
|
|
604688a00e | ||
|
|
62f6b07cba | ||
|
|
f956f1ed6e | ||
|
|
16b0820193 | ||
|
|
4b02267e96 | ||
|
|
746584c453 | ||
|
|
b56daaaca2 | ||
|
|
931e5e10c3 | ||
|
|
c172f838b1 | ||
|
|
c07ae29cd9 | ||
|
|
214c9bfd8b | ||
|
|
716140d64d | ||
|
|
088cb4ef59 | ||
|
|
d1472fc351 | ||
|
|
5c8c0c31d8 | ||
|
|
7f3c00c7a2 | ||
|
|
c180dab791 | ||
|
|
f24acc21c7 | ||
|
|
40b637849d | ||
|
|
e7db1685df | ||
|
|
eccbfd1011 | ||
|
|
90211f6769 | ||
|
|
edc32ac18e | ||
|
|
fe68e020e3 | ||
|
|
81e1e3544d | ||
|
|
09372d5c35 | ||
|
|
078a89e4ca | ||
|
|
dbc6ae26a6 | ||
|
|
b6f429867a | ||
|
|
09f50660ce | ||
|
|
189825b495 | ||
|
|
764b7ff610 | ||
|
|
d499db7f0e | ||
|
|
ed2d1c7bf9 | ||
|
|
14b73cbd47 | ||
|
|
3124785a08 | ||
|
|
60e6306107 | ||
|
|
42ccb7830a | ||
|
|
0bb03b9292 | ||
|
|
ed6fbf1480 | ||
|
|
477cec6021 | ||
|
|
924500d111 | ||
|
|
0677504ef1 | ||
|
|
ca2a7c4d9c | ||
|
|
28606629ad | ||
|
|
c817279464 | ||
|
|
009d6ed8ed | ||
|
|
5cec1282a9 | ||
|
|
340170fd29 | ||
|
|
7ed0cc139a | ||
|
|
2c822213eb | ||
|
|
c08bb39ffe | ||
|
|
5083d8ab34 | ||
|
|
7552a5dd07 | ||
|
|
c93d68f853 | ||
|
|
3e8aa4023d | ||
|
|
b443875e66 |
7
.bazelrc
7
.bazelrc
@@ -37,7 +37,6 @@ build:minimal --@io_bazel_rules_go//go/config:tags=minimal
|
||||
|
||||
# Release flags
|
||||
build:release --compilation_mode=opt
|
||||
build:release --stamp
|
||||
|
||||
# LLVM compiler for building C/C++ dependencies.
|
||||
build:llvm --define compiler=llvm
|
||||
@@ -231,3 +230,9 @@ build --modify_execution_info='GoStdlib.*=+no-remote-cache'
|
||||
|
||||
# Set bazel gotag
|
||||
build --define gotags=bazel
|
||||
|
||||
# Build the binary with Beacon API calls for the validator
|
||||
build --flag_alias=use_beacon_api=//validator/client/validator-client-factory:use_beacon_api
|
||||
|
||||
build:beacon_api --use_beacon_api
|
||||
build:beacon_api --define=gotags=use_beacon_api
|
||||
|
||||
6
.github/ISSUE_TEMPLATE/feature_request.md
vendored
6
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -16,12 +16,12 @@ Existing issues often contain information about workarounds, resolution, or prog
|
||||
|
||||
### Description
|
||||
|
||||
<!-- ✍️ A clear and concise description of the problem or missing capability... -->
|
||||
<!-- ✍️--> A clear and concise description of the problem or missing capability...
|
||||
|
||||
### Describe the solution you'd like
|
||||
|
||||
<!-- ✍️ If you have a solution in mind, please describe it. -->
|
||||
<!-- ✍️--> If you have a solution in mind, please describe it.
|
||||
|
||||
### Describe alternatives you've considered
|
||||
|
||||
<!-- ✍️ Have you considered any alternative solutions or workarounds? -->
|
||||
<!-- ✍️--> Have you considered any alternative solutions or workarounds?
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Dependency Management in Prysm
|
||||
# Dependency Managagement in Prysm
|
||||
|
||||
Prysm is go project with many complicated dependencies, including some c++ based libraries. There
|
||||
are two parts to Prysm's dependency management. Go modules and bazel managed dependencies. Be sure
|
||||
@@ -28,7 +28,7 @@ including complicated c++ dependencies.
|
||||
One key advantage of Bazel over vanilla `go build` is that Bazel automatically (re)builds generated
|
||||
pb.go files at build time when file changes are present in any protobuf definition file or after
|
||||
any updates to the protobuf compiler or other relevant dependencies. Vanilla go users should run
|
||||
the following scripts often to ensure their generated files are up to date. Furthermore, Prysm
|
||||
the following scripts often to ensure their generated files are up to date. Further more, Prysm
|
||||
generates SSZ marshal related code based on defined data structures. These generated files must
|
||||
also be updated and checked in as frequently.
|
||||
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -188,7 +188,7 @@ filegroup(
|
||||
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.3.0-rc.1"
|
||||
consensus_spec_version = "v1.3.0-alpha.1"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -204,7 +204,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "3d6fadb64674eb64a84fae6c2efa9949231ea91e7cb74ada9214097323e86569",
|
||||
sha256 = "b5a65eb5ecef1c4fca82ff29739936fee019e8a529ef392ea5e46aa39f40a0b2",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -220,7 +220,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "54ffbcab1e77316a280e6f5a64c6ed62351e8f5678e6fa49340e49b9b5575e8e",
|
||||
sha256 = "b381bb0184e69cb17d05fbbe75f48c6aec7726957d073e3a65c26671d5d27d37",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -236,7 +236,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "bb06d30ca533dc97d45f2367916ba9ff1b5af52f08a9d8c33bb7b1a61254094e",
|
||||
sha256 = "9466f2a5a2dea039a2deb953f0b5dce5399400028bf3f218ffef03f8ef9c446c",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -251,7 +251,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "9d22246c00ec3907ef8dc3ddccdfe6f7153ce46df73deee0a0176fe7e4aa1126",
|
||||
sha256 = "3cc3141651a320a1f5767d15826e85aaa96eb4459d9e1a1d3f5a0cdbc79b8f56",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -28,14 +28,13 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
getSignedBlockPath = "/eth/v2/beacon/blocks"
|
||||
getBlockRootPath = "/eth/v1/beacon/blocks/{{.Id}}/root"
|
||||
getForkForStatePath = "/eth/v1/beacon/states/{{.Id}}/fork"
|
||||
getWeakSubjectivityPath = "/eth/v1/beacon/weak_subjectivity"
|
||||
getForkSchedulePath = "/eth/v1/config/fork_schedule"
|
||||
getStatePath = "/eth/v2/debug/beacon/states"
|
||||
getNodeVersionPath = "/eth/v1/node/version"
|
||||
changeBLStoExecutionPath = "/eth/v1/beacon/pool/bls_to_execution_changes"
|
||||
getSignedBlockPath = "/eth/v2/beacon/blocks"
|
||||
getBlockRootPath = "/eth/v1/beacon/blocks/{{.Id}}/root"
|
||||
getForkForStatePath = "/eth/v1/beacon/states/{{.Id}}/fork"
|
||||
getWeakSubjectivityPath = "/eth/v1/beacon/weak_subjectivity"
|
||||
getForkSchedulePath = "/eth/v1/config/fork_schedule"
|
||||
getStatePath = "/eth/v2/debug/beacon/states"
|
||||
getNodeVersionPath = "/eth/v1/node/version"
|
||||
)
|
||||
|
||||
// StateOrBlockId represents the block_id / state_id parameters that several of the Eth Beacon API methods accept.
|
||||
@@ -147,6 +146,7 @@ func withSSZEncoding() reqOption {
|
||||
// get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
|
||||
func (c *Client) get(ctx context.Context, path string, opts ...reqOption) ([]byte, error) {
|
||||
u := c.baseURL.ResolveReference(&url.URL{Path: path})
|
||||
log.Printf("requesting %s", u.String())
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -343,60 +343,6 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SubmitChangeBLStoExecution calls a beacon API endpoint to set the withdrawal addresses based on the given signed messages.
|
||||
// If the API responds with something other than OK there will be failure messages associated to the corresponding request message.
|
||||
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apimiddleware.SignedBLSToExecutionChangeJson) error {
|
||||
u := c.baseURL.ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
|
||||
body, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal JSON")
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "invalid format, failed to create new POST request object")
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
resp, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
err = resp.Body.Close()
|
||||
}()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
decoder.DisallowUnknownFields()
|
||||
errorJson := &apimiddleware.IndexedVerificationFailureErrorJson{}
|
||||
if err := decoder.Decode(errorJson); err != nil {
|
||||
return errors.Wrapf(err, "failed to decode error JSON for %s", resp.Request.URL)
|
||||
}
|
||||
for _, failure := range errorJson.Failures {
|
||||
w := request[failure.Index].Message
|
||||
log.WithFields(log.Fields{
|
||||
"validator_index": w.ValidatorIndex,
|
||||
"withdrawal_address": w.ToExecutionAddress,
|
||||
}).Error(failure.Message)
|
||||
}
|
||||
return errors.Errorf("POST error %d: %s", errorJson.Code, errorJson.Message)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBLStoExecutionChanges gets all the set withdrawal messages in the node's operation pool.
|
||||
// Returns a struct representation of json response.
|
||||
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.BLSToExecutionChangesPoolResponseJson, error) {
|
||||
body, err := c.get(ctx, changeBLStoExecutionPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
poolResponse := &apimiddleware.BLSToExecutionChangesPoolResponseJson{}
|
||||
err = json.Unmarshal(body, poolResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return poolResponse, nil
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
var body string
|
||||
|
||||
@@ -110,7 +110,8 @@ func stringToUint256(s string) (Uint256, error) {
|
||||
|
||||
// sszBytesToUint256 creates a Uint256 from a ssz-style (little-endian byte slice) representation.
|
||||
func sszBytesToUint256(b []byte) (Uint256, error) {
|
||||
bi := bytesutil.LittleEndianBytesToBigInt(b)
|
||||
bi := new(big.Int)
|
||||
bi.SetBytes(bytesutil.ReverseByteOrder(b))
|
||||
if !isValidUint256(bi) {
|
||||
return Uint256{}, errors.Wrapf(errDecodeUint256, "value=%s", b)
|
||||
}
|
||||
|
||||
@@ -328,11 +328,6 @@ func (s *Service) IsFinalized(ctx context.Context, root [32]byte) bool {
|
||||
if s.ForkChoicer().FinalizedCheckpoint().Root == root {
|
||||
return true
|
||||
}
|
||||
// If node exists in our store, then it is not
|
||||
// finalized.
|
||||
if s.ForkChoicer().HasNode(root) {
|
||||
return false
|
||||
}
|
||||
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, root)
|
||||
}
|
||||
|
||||
|
||||
@@ -125,10 +125,7 @@ func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{}
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
b.Block().SetSlot(100)
|
||||
c.head = &head{block: b, state: s}
|
||||
c.head = &head{slot: 100, state: s}
|
||||
assert.Equal(t, types.Slot(100), c.HeadSlot())
|
||||
}
|
||||
|
||||
@@ -331,7 +328,7 @@ func TestService_ChainHeads(t *testing.T) {
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
@@ -415,7 +412,7 @@ func TestService_IsOptimistic(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
@@ -438,7 +435,7 @@ func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
|
||||
|
||||
func TestService_IsOptimisticForRoot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
@@ -456,7 +453,7 @@ func TestService_IsOptimisticForRoot(t *testing.T) {
|
||||
func TestService_IsOptimisticForRoot_DB(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
@@ -514,7 +511,7 @@ func TestService_IsOptimisticForRoot_DB(t *testing.T) {
|
||||
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
|
||||
@@ -8,7 +8,7 @@ var (
|
||||
// ErrInvalidBlockHashPayloadStatus is returned when the payload has invalid block hash.
|
||||
ErrInvalidBlockHashPayloadStatus = invalidBlock{error: errors.New("received an INVALID_BLOCK_HASH payload from execution engine")}
|
||||
// ErrUndefinedExecutionEngineError is returned when the execution engine returns an error that is not defined
|
||||
ErrUndefinedExecutionEngineError = errors.New("received an undefined execution engine error")
|
||||
ErrUndefinedExecutionEngineError = errors.New("received an undefined ee error")
|
||||
// errNilFinalizedInStore is returned when a nil finalized checkpt is returned from store.
|
||||
errNilFinalizedInStore = errors.New("nil finalized checkpoint returned from store")
|
||||
// errNilFinalizedCheckpoint is returned when a nil finalized checkpt is returned from a state.
|
||||
@@ -19,6 +19,8 @@ var (
|
||||
errInvalidNilSummary = errors.New("nil summary returned from the DB")
|
||||
// errWrongBlockCount is returned when the wrong number of blocks or block roots is used
|
||||
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
|
||||
// block is not a valid optimistic candidate block
|
||||
errNotOptimisticCandidate = errors.New("block is not suitable for optimistic sync")
|
||||
// errBlockNotFoundInCacheOrDB is returned when a block is not found in the cache or DB.
|
||||
errBlockNotFoundInCacheOrDB = errors.New("block not found in cache or db")
|
||||
// errNilStateFromStategen is returned when a nil state is returned from the state generator.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
@@ -40,6 +41,7 @@ type notifyForkchoiceUpdateArg struct {
|
||||
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkchoiceUpdateArg) (*enginev1.PayloadIDBytes, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdate")
|
||||
defer span.End()
|
||||
log.Infof("notifyForkchoiceUpdate, fork version=%#x, headRoot=%#x, headState.latest_block_header=%#x", arg.headState.Fork().CurrentVersion, arg.headRoot, arg.headState.LatestBlockHeader().BodyRoot)
|
||||
|
||||
headBlk := arg.headBlock
|
||||
if headBlk == nil || headBlk.IsNil() || headBlk.Body().IsNil() {
|
||||
@@ -52,7 +54,25 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
log.WithError(err).Error("Could not determine if head block is execution block")
|
||||
return nil, nil
|
||||
}
|
||||
var eth1BlockHash []byte
|
||||
if !isExecutionBlk {
|
||||
br, err := headBlk.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: this is a hack
|
||||
// try to grab the payload header from the state in case the block doesn't have it but the state does
|
||||
sp, err := arg.headState.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
log.WithError(err).Infof("notifyForkchoiceUpdate, skipping non-execution block with root=%#x", br)
|
||||
return nil, nil
|
||||
}
|
||||
eth1BlockHash = sp.BlockHash()
|
||||
if len(sp.BlockHash()) != 32 || bytes.Equal(eth1BlockHash, params.BeaconConfig().ZeroHash[:]) {
|
||||
log.Infof("notifyForkchoiceUpdate, skipping non-execution block with root=%#x and empty payload header block hash", br)
|
||||
return nil, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
headPayload, err := headBlk.Body().Execution()
|
||||
@@ -60,17 +80,20 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
log.WithError(err).Error("Could not get execution payload for head block")
|
||||
return nil, nil
|
||||
}
|
||||
eth1BlockHash = headPayload.BlockHash()
|
||||
finalizedHash := s.ForkChoicer().FinalizedPayloadBlockHash()
|
||||
justifiedHash := s.ForkChoicer().JustifiedPayloadBlockHash()
|
||||
fcs := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: headPayload.BlockHash(),
|
||||
HeadBlockHash: eth1BlockHash,
|
||||
SafeBlockHash: justifiedHash[:],
|
||||
FinalizedBlockHash: finalizedHash[:],
|
||||
}
|
||||
|
||||
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
|
||||
hasAttr, attr, proposerId := s.getPayloadAttribute(ctx, arg.headState, nextSlot)
|
||||
|
||||
hasAttr, attr, proposerId, err := s.getPayloadAttribute(ctx, arg.headState, nextSlot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head payload attribute")
|
||||
}
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
|
||||
if err != nil {
|
||||
switch err {
|
||||
@@ -78,7 +101,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
forkchoiceUpdatedOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"headSlot": headBlk.Slot(),
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash())),
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(eth1BlockHash)),
|
||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
|
||||
}).Info("Called fork choice updated with optimistic block")
|
||||
return payloadID, nil
|
||||
@@ -143,9 +166,10 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
log.WithField("payloadID", fmt.Sprintf("%#x", payloadID)).WithField("lastValidHash", fmt.Sprintf("%#x", lastValidHash)).Infof("notifyForkchoiceUpdate, no error from ee call")
|
||||
forkchoiceUpdatedValidNodeCount.Inc()
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, arg.headRoot); err != nil {
|
||||
log.WithError(err).Error("Could not set head root to valid")
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", arg.headRoot)).WithField("payloadID", fmt.Sprintf("%#x", payloadID)).WithField("lastValidHash", fmt.Sprintf("%#x", lastValidHash)).WithError(err).Error("Could not set head root to valid")
|
||||
return nil, nil
|
||||
}
|
||||
// If the forkchoice update call has an attribute, update the proposer payload ID cache.
|
||||
@@ -153,9 +177,10 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
var pId [8]byte
|
||||
copy(pId[:], payloadID[:])
|
||||
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId, arg.headRoot)
|
||||
log.Infof("notifyForkchoiceUpdate, SetProposerAndPayloadIDs(nextSlot=%d, proposerId=%d, pId=%#x, arg.headRoot=%#x)", nextSlot, proposerId, pId, arg.headRoot)
|
||||
} else if hasAttr && payloadID == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
|
||||
"blockHash": fmt.Sprintf("%#x", eth1BlockHash),
|
||||
"slot": headBlk.Slot(),
|
||||
}).Error("Received nil payload ID on VALID engine response")
|
||||
}
|
||||
@@ -250,25 +275,24 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
|
||||
// getPayloadAttributes returns the payload attributes for the given state and slot.
|
||||
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
|
||||
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot types.Slot) (bool, payloadattribute.Attributer, types.ValidatorIndex) {
|
||||
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot types.Slot) (bool, payloadattribute.Attributer, types.ValidatorIndex, error) {
|
||||
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
|
||||
// Root is `[32]byte{}` since we are retrieving proposer ID of a given slot. During insertion at assignment the root was not known.
|
||||
proposerID, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, [32]byte{} /* root */)
|
||||
if !ok { // There's no need to build attribute if there is no proposer for slot.
|
||||
return false, emptyAttri, 0
|
||||
return false, emptyAttri, 0, nil
|
||||
}
|
||||
log.Infof("getPayloadAttribute, GetProposerPayloadIDs(nextSlot=%d, arg.headRoot=%#x,idx=%v)", slot, [32]byte{}, proposerID)
|
||||
|
||||
// Get previous randao.
|
||||
st = st.Copy()
|
||||
st, err := transition.ProcessSlotsIfPossible(ctx, st, slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not process slots to get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
return false, emptyAttri, 0, err
|
||||
}
|
||||
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get randao mix to get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
return false, emptyAttri, 0, err
|
||||
}
|
||||
|
||||
// Get fee recipient.
|
||||
@@ -286,8 +310,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
"Please refer to our documentation for instructions")
|
||||
}
|
||||
case err != nil:
|
||||
log.WithError(err).Error("Could not get fee recipient to get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
return false, emptyAttri, 0, errors.Wrap(err, "could not get fee recipient in db")
|
||||
default:
|
||||
feeRecipient = recipient
|
||||
}
|
||||
@@ -295,8 +318,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
// Get timestamp.
|
||||
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timestamp to get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
return false, emptyAttri, 0, err
|
||||
}
|
||||
|
||||
var attr payloadattribute.Attributer
|
||||
@@ -304,8 +326,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
case version.Capella:
|
||||
withdrawals, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
return false, emptyAttri, 0, errors.Wrap(err, "could not get expected withdrawals")
|
||||
}
|
||||
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV2{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
@@ -314,8 +335,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
Withdrawals: withdrawals,
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
return false, emptyAttri, 0, err
|
||||
}
|
||||
case version.Bellatrix:
|
||||
attr, err = payloadattribute.New(&enginev1.PayloadAttributes{
|
||||
@@ -324,15 +344,13 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
SuggestedFeeRecipient: feeRecipient.Bytes(),
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return false, emptyAttri, 0
|
||||
return false, emptyAttri, 0, err
|
||||
}
|
||||
default:
|
||||
log.WithField("version", st.Version()).Error("Could not get payload attribute due to unknown state version")
|
||||
return false, emptyAttri, 0
|
||||
return false, emptyAttri, 0, errors.New("unknown state version")
|
||||
}
|
||||
|
||||
return true, attr, proposerID
|
||||
return true, attr, proposerID, nil
|
||||
}
|
||||
|
||||
// removeInvalidBlockAndState removes the invalid block and its corresponding state from the cache and DB.
|
||||
|
||||
@@ -698,14 +698,14 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
e := &mockExecution.EngineClient{ErrNewPayload: tt.newPayloadErr, BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
e := &mockExecution.EngineClient{ErrNewPayload: tt.newPayloadErr, BlockByHashMap: map[[32]byte]*v1.ExecutionBlockBellatrix{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlockBellatrix{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("b")),
|
||||
},
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
e.BlockByHashMap[[32]byte{'b'}] = &v1.ExecutionBlock{
|
||||
e.BlockByHashMap[[32]byte{'b'}] = &v1.ExecutionBlockBellatrix{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("3")),
|
||||
},
|
||||
@@ -759,14 +759,14 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
e := &mockExecution.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
e := &mockExecution.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlockBellatrix{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlockBellatrix{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("b")),
|
||||
},
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
e.BlockByHashMap[[32]byte{'b'}] = &v1.ExecutionBlock{
|
||||
e.BlockByHashMap[[32]byte{'b'}] = &v1.ExecutionBlockBellatrix{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("3")),
|
||||
},
|
||||
@@ -792,8 +792,8 @@ func Test_GetPayloadAttribute(t *testing.T) {
|
||||
// Cache miss
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0)
|
||||
hasPayload, _, vId, err := service.getPayloadAttribute(ctx, nil, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, hasPayload)
|
||||
require.Equal(t, types.ValidatorIndex(0), vId)
|
||||
|
||||
@@ -801,8 +801,10 @@ func Test_GetPayloadAttribute(t *testing.T) {
|
||||
suggestedVid := types.ValidatorIndex(1)
|
||||
slot := types.Slot(1)
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
hook := logTest.NewGlobal()
|
||||
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot)
|
||||
hasPayload, attr, vId, err := service.getPayloadAttribute(ctx, st, slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
@@ -812,56 +814,13 @@ func Test_GetPayloadAttribute(t *testing.T) {
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []types.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot)
|
||||
hasPayload, attr, vId, err = service.getPayloadAttribute(ctx, st, slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
|
||||
// Cache miss
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisStateCapella(t, 1)
|
||||
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0)
|
||||
require.Equal(t, false, hasPayload)
|
||||
require.Equal(t, types.ValidatorIndex(0), vId)
|
||||
|
||||
// Cache hit, advance state, no fee recipient
|
||||
suggestedVid := types.ValidatorIndex(1)
|
||||
slot := types.Slot(1)
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hook := logTest.NewGlobal()
|
||||
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
a, err := attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(a))
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []types.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
|
||||
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
a, err = attr.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(a))
|
||||
}
|
||||
|
||||
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
@@ -877,7 +836,7 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
var genesisStateRoot [32]byte
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||
genesisRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/math"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -52,6 +51,7 @@ func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
|
||||
|
||||
// This defines the current chain service's view of head.
|
||||
type head struct {
|
||||
slot types.Slot // current head slot.
|
||||
root [32]byte // current head root.
|
||||
block interfaces.SignedBeaconBlock // current head block.
|
||||
state state.BeaconState // current head state.
|
||||
@@ -109,21 +109,11 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
}
|
||||
dis := headSlot + newHeadSlot - 2*forkSlot
|
||||
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
|
||||
oldWeight, err := s.ForkChoicer().Weight(oldHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
newWeight, err := s.ForkChoicer().Weight(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||
"newRoot": fmt.Sprintf("%#x", newHeadRoot),
|
||||
"newWeight": newWeight,
|
||||
"oldSlot": fmt.Sprintf("%d", headSlot),
|
||||
"oldRoot": fmt.Sprintf("%#x", oldHeadRoot),
|
||||
"oldWeight": oldWeight,
|
||||
"commonAncestorRoot": fmt.Sprintf("%#x", commonRoot),
|
||||
"distance": dis,
|
||||
"depth": dep,
|
||||
@@ -149,7 +139,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
},
|
||||
})
|
||||
|
||||
if err := s.saveOrphanedOperations(ctx, oldHeadRoot, newHeadRoot); err != nil {
|
||||
if err := s.saveOrphanedAtts(ctx, oldHeadRoot, newHeadRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
reorgCount.Inc()
|
||||
@@ -212,11 +202,12 @@ func (s *Service) setHead(root [32]byte, block interfaces.SignedBeaconBlock, sta
|
||||
return err
|
||||
}
|
||||
s.head = &head{
|
||||
slot: block.Block().Slot(),
|
||||
root: root,
|
||||
block: bCp,
|
||||
state: state.Copy(),
|
||||
}
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
// This sets head view object which is used to track the head slot, root, block and state. The method
|
||||
@@ -232,6 +223,7 @@ func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.SignedBeaco
|
||||
return err
|
||||
}
|
||||
s.head = &head{
|
||||
slot: block.Block().Slot(),
|
||||
root: root,
|
||||
block: bCp,
|
||||
state: state,
|
||||
@@ -242,10 +234,7 @@ func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.SignedBeaco
|
||||
// This returns the head slot.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headSlot() types.Slot {
|
||||
if s.head == nil || s.head.block == nil || s.head.block.Block() == nil {
|
||||
return 0
|
||||
}
|
||||
return s.head.block.Block().Slot()
|
||||
return s.head.slot
|
||||
}
|
||||
|
||||
// This returns the head root.
|
||||
@@ -358,9 +347,9 @@ func (s *Service) notifyNewHeadEvent(
|
||||
return nil
|
||||
}
|
||||
|
||||
// This saves the Attestations and BLSToExecChanges between `orphanedRoot` and the common ancestor root that is derived using `newHeadRoot`.
|
||||
// This saves the attestations between `orphanedRoot` and the common ancestor root that is derived using `newHeadRoot`.
|
||||
// It also filters out the attestations that is one epoch older as a defense so invalid attestations don't flow into the attestation pool.
|
||||
func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
|
||||
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
|
||||
commonAncestorRoot, _, err := s.ForkChoicer().CommonAncestor(ctx, newHeadRoot, orphanedRoot)
|
||||
switch {
|
||||
// Exit early if there's no common ancestor and root doesn't exist, there would be nothing to save.
|
||||
@@ -399,15 +388,6 @@ func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]b
|
||||
}
|
||||
saveOrphanedAttCount.Inc()
|
||||
}
|
||||
if orphanedBlk.Version() >= version.Capella {
|
||||
changes, err := orphanedBlk.Block().Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
for _, c := range changes {
|
||||
s.cfg.BLSToExecPool.InsertBLSToExecChange(c)
|
||||
}
|
||||
}
|
||||
parentRoot := orphanedBlk.Block().ParentRoot()
|
||||
orphanedRoot = bytesutil.ToBytes32(parentRoot[:])
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
mock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
@@ -31,7 +30,7 @@ func TestSaveHead_Same(t *testing.T) {
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
r := [32]byte{'A'}
|
||||
service.head = &head{root: r}
|
||||
service.head = &head{slot: 0, root: r}
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
@@ -54,6 +53,7 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
service.head = &head{
|
||||
slot: 0,
|
||||
root: oldRoot,
|
||||
block: oldBlock,
|
||||
}
|
||||
@@ -107,6 +107,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
service.head = &head{
|
||||
slot: 0,
|
||||
root: oldRoot,
|
||||
block: oldBlock,
|
||||
}
|
||||
@@ -284,7 +285,7 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []*ethpb.Attestation{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
@@ -302,37 +303,31 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.cfg.BLSToExecPool = blstoexec.NewPool()
|
||||
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2
|
||||
// \-4
|
||||
st, keys := util.DeterministicGenesisStateCapella(t, 64)
|
||||
blkConfig := util.DefaultBlockGenConfig()
|
||||
blkConfig.NumBLSChanges = 5
|
||||
blkG, err := util.GenerateFullBlockCapella(st, keys, blkConfig, 1)
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blkConfig.NumBLSChanges = 10
|
||||
blk1, err := util.GenerateFullBlockCapella(st, keys, blkConfig, 2)
|
||||
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
blk1.Block.ParentRoot = rG[:]
|
||||
r1, err := blk1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blkConfig.NumBLSChanges = 15
|
||||
blk2, err := util.GenerateFullBlockCapella(st, keys, blkConfig, 3)
|
||||
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
assert.NoError(t, err)
|
||||
blk2.Block.ParentRoot = r1[:]
|
||||
r2, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk4 := util.NewBeaconBlockCapella()
|
||||
blkConfig.NumBLSChanges = 0
|
||||
blk4 := util.NewBeaconBlock()
|
||||
blk4.Block.Slot = 4
|
||||
blk4.Block.ParentRoot = rG[:]
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
@@ -340,7 +335,7 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
|
||||
for _, blk := range []*ethpb.SignedBeaconBlockCapella{blkG, blk1, blk2, blk4} {
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
@@ -349,11 +344,8 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedOperations(ctx, r2, r4))
|
||||
require.Equal(t, 1, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
pending, err := service.cfg.BLSToExecPool.PendingBLSToExecChanges()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 15, len(pending))
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r2, r4))
|
||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
@@ -412,7 +404,7 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []*ethpb.Attestation{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
@@ -476,7 +468,7 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedOperations(ctx, r2, r4))
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r2, r4))
|
||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
}
|
||||
|
||||
|
||||
@@ -38,14 +38,14 @@ func logStateTransitionData(b interfaces.BeaconBlock) error {
|
||||
if len(b.Body().VoluntaryExits()) > 0 {
|
||||
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
|
||||
}
|
||||
if b.Version() >= version.Altair {
|
||||
if b.Version() == version.Altair || b.Version() == version.Bellatrix {
|
||||
agg, err := b.Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
|
||||
}
|
||||
if b.Version() >= version.Bellatrix {
|
||||
if b.Version() == version.Bellatrix {
|
||||
p, err := b.Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -87,7 +87,7 @@ func logBlockSyncStatus(block interfaces.BeaconBlock, blockRoot [32]byte, justif
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime),
|
||||
"deposits": len(block.Body().Deposits()),
|
||||
"deposits": fmt.Sprintf("%d/%d", len(block.Body().Deposits()), block.Body().Eth1Data().DepositCount),
|
||||
}).Debug("Synced new block")
|
||||
} else {
|
||||
log.WithFields(logrus.Fields{
|
||||
|
||||
@@ -317,8 +317,9 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
||||
var b *precompute.Balance
|
||||
var v []*precompute.Validator
|
||||
var err error
|
||||
|
||||
if headState.Version() == version.Phase0 {
|
||||
switch headState.Version() {
|
||||
case version.Phase0:
|
||||
// Validator participation should be viewed on the canonical chain.
|
||||
v, b, err = precompute.New(ctx, headState)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -327,7 +328,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if headState.Version() >= version.Altair {
|
||||
case version.Altair, version.Bellatrix, version.Capella:
|
||||
v, b, err = altair.InitializePrecomputeValidators(ctx, headState)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -336,10 +337,9 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
default:
|
||||
return errors.Errorf("invalid state type provided: %T", headState.ToProtoUnsafe())
|
||||
}
|
||||
|
||||
prevEpochActiveBalances.Set(float64(b.ActivePrevEpoch))
|
||||
prevEpochSourceBalances.Set(float64(b.PrevEpochAttested))
|
||||
prevEpochTargetBalances.Set(float64(b.PrevEpochTargetAttested))
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -49,13 +50,9 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
|
||||
if payload.IsNil() {
|
||||
return errors.New("nil execution payload")
|
||||
}
|
||||
ok, err := canUseValidatedTerminalBlockHash(b.Block().Slot(), payload)
|
||||
if err != nil {
|
||||
if err := validateTerminalBlockHash(b.Block().Slot(), payload); err != nil {
|
||||
return errors.Wrap(err, "could not validate terminal block hash")
|
||||
}
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
mergeBlockParentHash, mergeBlockTD, err := s.getBlkParentHashAndTD(ctx, payload.ParentHash())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get merge block parent hash and total difficulty")
|
||||
@@ -90,15 +87,19 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
|
||||
|
||||
// getBlkParentHashAndTD retrieves the parent hash and total difficulty of the given block.
|
||||
func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]byte, *uint256.Int, error) {
|
||||
blk, err := s.cfg.ExecutionEngineCaller.ExecutionBlockByHash(ctx, common.BytesToHash(blkHash), false /* no txs */)
|
||||
blk, err := s.cfg.ExecutionEngineCaller.ExecutionBlockByHash(ctx, version.Bellatrix, common.BytesToHash(blkHash), false /* no txs */)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get pow block")
|
||||
}
|
||||
|
||||
if blk == nil {
|
||||
return nil, nil, errors.New("pow block is nil")
|
||||
}
|
||||
blk.Version = version.Bellatrix
|
||||
blkTDBig, err := hexutil.DecodeBig(blk.TotalDifficulty)
|
||||
blkBellatrix, ok := blk.(*pb.ExecutionBlockBellatrix)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("wrong execution block type %T", blk)
|
||||
}
|
||||
blkTDBig, err := hexutil.DecodeBig(blkBellatrix.TotalDifficulty)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not decode merge block total difficulty")
|
||||
}
|
||||
@@ -106,10 +107,10 @@ func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]
|
||||
if overflows {
|
||||
return nil, nil, errors.New("total difficulty overflows")
|
||||
}
|
||||
return blk.ParentHash[:], blkTDUint256, nil
|
||||
return blkBellatrix.ParentHash[:], blkTDUint256, nil
|
||||
}
|
||||
|
||||
// canUseValidatedTerminalBlockHash validates if the merge block is a valid terminal PoW block.
|
||||
// validateTerminalBlockHash validates if the merge block is a valid terminal PoW block.
|
||||
// spec code:
|
||||
// if TERMINAL_BLOCK_HASH != Hash32():
|
||||
//
|
||||
@@ -117,17 +118,17 @@ func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]
|
||||
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
|
||||
// return
|
||||
func canUseValidatedTerminalBlockHash(blkSlot types.Slot, payload interfaces.ExecutionData) (bool, error) {
|
||||
func validateTerminalBlockHash(blkSlot types.Slot, payload interfaces.ExecutionData) error {
|
||||
if bytesutil.ToBytes32(params.BeaconConfig().TerminalBlockHash.Bytes()) == [32]byte{} {
|
||||
return false, nil
|
||||
return nil
|
||||
}
|
||||
if params.BeaconConfig().TerminalBlockHashActivationEpoch > slots.ToEpoch(blkSlot) {
|
||||
return false, errors.New("terminal block hash activation epoch not reached")
|
||||
return errors.New("terminal block hash activation epoch not reached")
|
||||
}
|
||||
if !bytes.Equal(payload.ParentHash(), params.BeaconConfig().TerminalBlockHash.Bytes()) {
|
||||
return false, errors.New("parent hash does not match terminal block hash")
|
||||
return errors.New("parent hash does not match terminal block hash")
|
||||
}
|
||||
return true, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateTerminalBlockDifficulties validates terminal pow block by comparing own total difficulty with parent's total difficulty.
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
)
|
||||
|
||||
func Test_validTerminalPowBlock(t *testing.T) {
|
||||
@@ -119,18 +118,18 @@ func Test_validateMergeBlock(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlockBellatrix{}}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
a := [32]byte{'a'}
|
||||
b := [32]byte{'b'}
|
||||
mergeBlockParentHash := [32]byte{'3'}
|
||||
engine.BlockByHashMap[a] = &enginev1.ExecutionBlock{
|
||||
engine.BlockByHashMap[a] = &enginev1.ExecutionBlockBellatrix{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: b,
|
||||
},
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
engine.BlockByHashMap[b] = &enginev1.ExecutionBlock{
|
||||
engine.BlockByHashMap[b] = &enginev1.ExecutionBlockBellatrix{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: mergeBlockParentHash,
|
||||
},
|
||||
@@ -169,12 +168,12 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlockBellatrix{}}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
h := [32]byte{'a'}
|
||||
p := [32]byte{'b'}
|
||||
td := "0x1"
|
||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlockBellatrix{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: p,
|
||||
},
|
||||
@@ -192,7 +191,7 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
||||
require.ErrorContains(t, "pow block is nil", err)
|
||||
|
||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlockBellatrix{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: p,
|
||||
},
|
||||
@@ -201,7 +200,7 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
||||
require.ErrorContains(t, "could not decode merge block total difficulty: hex string without 0x prefix", err)
|
||||
|
||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlockBellatrix{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: p,
|
||||
},
|
||||
@@ -214,42 +213,20 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
func Test_validateTerminalBlockHash(t *testing.T) {
|
||||
wrapped, err := blocks.WrappedExecutionPayload(&enginev1.ExecutionPayload{})
|
||||
require.NoError(t, err)
|
||||
ok, err := canUseValidatedTerminalBlockHash(1, wrapped)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ok)
|
||||
require.NoError(t, validateTerminalBlockHash(1, wrapped))
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalBlockHash = [32]byte{0x01}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
ok, err = canUseValidatedTerminalBlockHash(1, wrapped)
|
||||
require.ErrorContains(t, "terminal block hash activation epoch not reached", err)
|
||||
require.Equal(t, false, ok)
|
||||
require.ErrorContains(t, "terminal block hash activation epoch not reached", validateTerminalBlockHash(1, wrapped))
|
||||
|
||||
cfg.TerminalBlockHashActivationEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
ok, err = canUseValidatedTerminalBlockHash(1, wrapped)
|
||||
require.ErrorContains(t, "parent hash does not match terminal block hash", err)
|
||||
require.Equal(t, false, ok)
|
||||
require.ErrorContains(t, "parent hash does not match terminal block hash", validateTerminalBlockHash(1, wrapped))
|
||||
|
||||
wrapped, err = blocks.WrappedExecutionPayload(&enginev1.ExecutionPayload{
|
||||
ParentHash: cfg.TerminalBlockHash.Bytes(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
ok, err = canUseValidatedTerminalBlockHash(1, wrapped)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(ðpb.SignedBeaconBlockBellatrix{}))
|
||||
require.NoError(t, err)
|
||||
blk.Block().SetSlot(1)
|
||||
require.NoError(t, blk.Block().Body().SetExecution(wrapped))
|
||||
require.NoError(t, service.validateMergeBlock(ctx, blk))
|
||||
require.NoError(t, validateTerminalBlockHash(1, wrapped))
|
||||
}
|
||||
|
||||
@@ -337,7 +337,11 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
|
||||
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
fCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
sigSet := bls.NewSet()
|
||||
sigSet := &bls.SignatureBatch{
|
||||
Signatures: [][]byte{},
|
||||
PublicKeys: []bls.PublicKey{},
|
||||
Messages: [][32]byte{},
|
||||
}
|
||||
type versionAndHeader struct {
|
||||
version int
|
||||
header interfaces.ExecutionData
|
||||
@@ -377,13 +381,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
}
|
||||
sigSet.Join(set)
|
||||
}
|
||||
|
||||
var verify bool
|
||||
if features.Get().EnableVerboseSigVerification {
|
||||
verify, err = sigSet.VerifyVerbosely()
|
||||
} else {
|
||||
verify, err = sigSet.Verify()
|
||||
}
|
||||
verify, err := sigSet.Verify()
|
||||
if err != nil {
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
@@ -531,7 +529,11 @@ func (s *Service) insertBlockToForkchoiceStore(ctx context.Context, blk interfac
|
||||
}
|
||||
}
|
||||
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, st, root); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This feeds in the attestations included in the block to fork choice store. It's allows fork choice store
|
||||
@@ -557,6 +559,22 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Be
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) handleBlockBLSToExecChanges(blk interfaces.BeaconBlock) error {
|
||||
if blk.Version() < version.Capella {
|
||||
return nil
|
||||
}
|
||||
changes, err := blk.Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
for _, change := range changes {
|
||||
if err := s.cfg.BLSToExecPool.MarkIncluded(change); err != nil {
|
||||
return errors.Wrap(err, "could not mark BLSToExecutionChange as included")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
|
||||
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
|
||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
|
||||
@@ -694,6 +712,8 @@ func (s *Service) fillMissingBlockPayloadId(ctx context.Context, ti time.Time) e
|
||||
// Head root should be empty when retrieving proposer index for the next slot.
|
||||
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* head root */)
|
||||
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
|
||||
log.Infof("missingPayloadID, GetProposerPayloadIDs(nextSlot=%d, arg.headRoot=%#x,has=%v,pid=%#x)", s.CurrentSlot()+1, [32]byte{}, has, id)
|
||||
|
||||
if has && id == [8]byte{} {
|
||||
missedPayloadIDFilledCount.Inc()
|
||||
headBlock, err := s.headBlock()
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
@@ -68,6 +69,7 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.BeaconBloc
|
||||
// during initial syncing. There's no risk given a state summary object is just a
|
||||
// a subset of the block object.
|
||||
if !s.cfg.BeaconDB.HasStateSummary(ctx, parentRoot) && !s.cfg.BeaconDB.HasBlock(ctx, parentRoot) {
|
||||
log.Errorf("requesting blockroot %#x", bytesutil.Trunc(parentRoot[:]))
|
||||
return errors.New("could not reconstruct parent state")
|
||||
}
|
||||
|
||||
@@ -316,6 +318,23 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// The deletes input attestations from the attestation pool, so proposers don't include them in a block for the future.
|
||||
func (s *Service) deletePoolAtts(atts []*ethpb.Attestation) error {
|
||||
for _, att := range atts {
|
||||
if helpers.IsAggregated(att) {
|
||||
if err := s.cfg.AttPool.DeleteAggregatedAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := s.cfg.AttPool.DeleteUnaggregatedAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This ensures that the input root defaults to using genesis root instead of zero hashes. This is needed for handling
|
||||
// fork choice justification routine.
|
||||
func (s *Service) ensureRootNotZeros(root [32]byte) [32]byte {
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
@@ -58,7 +59,7 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
var genesisStateRoot [32]byte
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
@@ -361,7 +362,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
|
||||
var genesisStateRoot [32]byte
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
@@ -420,7 +421,7 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
|
||||
var genesisStateRoot [32]byte
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
@@ -1003,7 +1004,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 10}))
|
||||
assert.NoError(t, gs.SetEth1DepositIndex(8))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
|
||||
var zeroSig [96]byte
|
||||
zeroSig := [96]byte{}
|
||||
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
root := []byte(strconv.Itoa(int(i)))
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, ðpb.Deposit{Data: ðpb.Deposit_Data{
|
||||
@@ -1041,7 +1042,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
assert.NoError(t, gs2.SetEth1Data(ðpb.Eth1Data{DepositCount: 15}))
|
||||
assert.NoError(t, gs2.SetEth1DepositIndex(13))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}, gs2))
|
||||
var zeroSig [96]byte
|
||||
zeroSig := [96]byte{}
|
||||
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
root := []byte(strconv.Itoa(int(i)))
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, ðpb.Deposit{Data: ðpb.Deposit_Data{
|
||||
@@ -1284,14 +1285,14 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
e := &mockExecution.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[aHash] = &enginev1.ExecutionBlock{
|
||||
e := &mockExecution.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlockBellatrix{}}
|
||||
e.BlockByHashMap[aHash] = &enginev1.ExecutionBlockBellatrix{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: bHash,
|
||||
},
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
e.BlockByHashMap[bHash] = &enginev1.ExecutionBlock{
|
||||
e.BlockByHashMap[bHash] = &enginev1.ExecutionBlockBellatrix{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("3")),
|
||||
},
|
||||
@@ -2304,6 +2305,65 @@ func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {
|
||||
require.NoError(t, service.fillMissingBlockPayloadId(ctx, time.Unix(int64(params.BeaconConfig().SecondsPerSlot/2), 0)))
|
||||
}
|
||||
|
||||
func TestHandleBBlockBLSToExecutionChanges(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fc := doublylinkedtree.New()
|
||||
pool := blstoexec.NewPool()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithBLSToExecPool(pool),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("pre Capella block", func(t *testing.T) {
|
||||
body := ðpb.BeaconBlockBodyBellatrix{}
|
||||
pbb := ðpb.BeaconBlockBellatrix{
|
||||
Body: body,
|
||||
}
|
||||
blk, err := consensusblocks.NewBeaconBlock(pbb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
})
|
||||
|
||||
t.Run("Post Capella no changes", func(t *testing.T) {
|
||||
body := ðpb.BeaconBlockBodyCapella{}
|
||||
pbb := ðpb.BeaconBlockCapella{
|
||||
Body: body,
|
||||
}
|
||||
blk, err := consensusblocks.NewBeaconBlock(pbb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
})
|
||||
|
||||
t.Run("Post Capella some changes", func(t *testing.T) {
|
||||
idx := types.ValidatorIndex(123)
|
||||
change := ðpb.BLSToExecutionChange{
|
||||
ValidatorIndex: idx,
|
||||
}
|
||||
signedChange := ðpb.SignedBLSToExecutionChange{
|
||||
Message: change,
|
||||
}
|
||||
body := ðpb.BeaconBlockBodyCapella{
|
||||
BlsToExecutionChanges: []*ethpb.SignedBLSToExecutionChange{signedChange},
|
||||
}
|
||||
pbb := ðpb.BeaconBlockCapella{
|
||||
Body: body,
|
||||
}
|
||||
blk, err := consensusblocks.NewBeaconBlock(pbb)
|
||||
require.NoError(t, err)
|
||||
|
||||
pool.InsertBLSToExecChange(signedChange)
|
||||
require.Equal(t, true, pool.ValidatorExists(idx))
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
require.Equal(t, false, pool.ValidatorExists(idx))
|
||||
})
|
||||
}
|
||||
|
||||
// Helper function to simulate the block being on time or delayed for proposer
|
||||
// boost. It alters the genesisTime tracked by the store.
|
||||
func driftGenesisTime(s *Service, slot int64, delay int64) {
|
||||
|
||||
@@ -159,6 +159,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
require.NoError(t, service.saveInitSyncBlock(ctx, r1, wsb))
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
service.head = &head{
|
||||
slot: 1,
|
||||
root: r1,
|
||||
block: wsb,
|
||||
state: st,
|
||||
@@ -176,6 +177,7 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
st, _ = util.DeterministicGenesisState(t, 1)
|
||||
service.head = &head{
|
||||
slot: 1,
|
||||
root: r1,
|
||||
block: wsb,
|
||||
state: st,
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v3/time"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -146,16 +145,16 @@ func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.A
|
||||
}
|
||||
|
||||
func (s *Service) handlePostBlockOperations(b interfaces.BeaconBlock) error {
|
||||
// Delete the processed block attestations from attestation pool.
|
||||
if err := s.deletePoolAtts(b.Body().Attestations()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Mark block exits as seen so we don't include same ones in future blocks.
|
||||
for _, e := range b.Body().VoluntaryExits() {
|
||||
s.cfg.ExitPool.MarkIncluded(e)
|
||||
}
|
||||
|
||||
// Mark block BLS changes as seen so we don't include same ones in future blocks.
|
||||
if err := s.handleBlockBLSToExecChanges(b); err != nil {
|
||||
return errors.Wrap(err, "could not process BLSToExecutionChanges")
|
||||
}
|
||||
|
||||
// Mark attester slashings as seen so we don't include same ones in future blocks.
|
||||
for _, as := range b.Body().AttesterSlashings() {
|
||||
s.cfg.SlashingPool.MarkIncludedAttesterSlashing(as)
|
||||
@@ -163,20 +162,6 @@ func (s *Service) handlePostBlockOperations(b interfaces.BeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) handleBlockBLSToExecChanges(blk interfaces.BeaconBlock) error {
|
||||
if blk.Version() < version.Capella {
|
||||
return nil
|
||||
}
|
||||
changes, err := blk.Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
for _, change := range changes {
|
||||
s.cfg.BLSToExecPool.MarkIncluded(change)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This checks whether it's time to start saving hot state to DB.
|
||||
// It's time when there's `epochsSinceFinalitySaveHotStateDB` epochs of non-finality.
|
||||
func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
@@ -34,7 +33,7 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
return blk
|
||||
}
|
||||
//params.SetupTestConfigCleanupWithLock(t)
|
||||
params.SetupTestConfigCleanupWithLock(t)
|
||||
bc := params.BeaconConfig().Copy()
|
||||
bc.ShardCommitteePeriod = 0 // Required for voluntary exits test in reasonable time.
|
||||
params.OverrideBeaconConfig(bc)
|
||||
@@ -333,62 +332,3 @@ func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB")
|
||||
}
|
||||
|
||||
func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fc := doublylinkedtree.New()
|
||||
pool := blstoexec.NewPool()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fc)),
|
||||
WithForkChoiceStore(fc),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{}),
|
||||
WithBLSToExecPool(pool),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("pre Capella block", func(t *testing.T) {
|
||||
body := ðpb.BeaconBlockBodyBellatrix{}
|
||||
pbb := ðpb.BeaconBlockBellatrix{
|
||||
Body: body,
|
||||
}
|
||||
blk, err := blocks.NewBeaconBlock(pbb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
})
|
||||
|
||||
t.Run("Post Capella no changes", func(t *testing.T) {
|
||||
body := ðpb.BeaconBlockBodyCapella{}
|
||||
pbb := ðpb.BeaconBlockCapella{
|
||||
Body: body,
|
||||
}
|
||||
blk, err := blocks.NewBeaconBlock(pbb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
})
|
||||
|
||||
t.Run("Post Capella some changes", func(t *testing.T) {
|
||||
idx := types.ValidatorIndex(123)
|
||||
change := ðpb.BLSToExecutionChange{
|
||||
ValidatorIndex: idx,
|
||||
}
|
||||
signedChange := ðpb.SignedBLSToExecutionChange{
|
||||
Message: change,
|
||||
}
|
||||
body := ðpb.BeaconBlockBodyCapella{
|
||||
BlsToExecutionChanges: []*ethpb.SignedBLSToExecutionChange{signedChange},
|
||||
}
|
||||
pbb := ðpb.BeaconBlockCapella{
|
||||
Body: body,
|
||||
}
|
||||
blk, err := blocks.NewBeaconBlock(pbb)
|
||||
require.NoError(t, err)
|
||||
|
||||
pool.InsertBLSToExecChange(signedChange)
|
||||
require.Equal(t, true, pool.ValidatorExists(idx))
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
require.Equal(t, false, pool.ValidatorExists(idx))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -311,7 +311,10 @@ func (s *Service) initializeHeadFromDB(ctx context.Context) error {
|
||||
if err := s.setHead(finalizedRoot, finalizedBlock, finalizedState); err != nil {
|
||||
return errors.Wrap(err, "could not set head")
|
||||
}
|
||||
|
||||
_, err = s.notifyForkchoiceUpdate(s.ctx, ¬ifyForkchoiceUpdateArg{headState: finalizedState, headRoot: finalizedRoot, headBlock: finalizedBlock.Block()})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error calling FCU with finalized state at startup")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -446,6 +449,10 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
if err := s.setHead(genesisBlkRoot, genesisBlk, genesisState); err != nil {
|
||||
log.WithError(err).Fatal("Could not set head")
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdate(s.ctx, ¬ifyForkchoiceUpdateArg{headState: genesisState, headRoot: genesisBlkRoot, headBlock: genesisBlk.Block()})
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Could not call FCU with genesis")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
7
beacon-chain/cache/checkpoint_state.go
vendored
7
beacon-chain/cache/checkpoint_state.go
vendored
@@ -1,6 +1,8 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
@@ -30,6 +32,7 @@ var (
|
||||
// CheckpointStateCache is a struct with 1 queue for looking up state by checkpoint.
|
||||
type CheckpointStateCache struct {
|
||||
cache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewCheckpointStateCache creates a new checkpoint state cache for storing/accessing processed state.
|
||||
@@ -42,6 +45,8 @@ func NewCheckpointStateCache() *CheckpointStateCache {
|
||||
// StateByCheckpoint fetches state by checkpoint. Returns true with a
|
||||
// reference to the CheckpointState info, if exists. Otherwise returns false, nil.
|
||||
func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
h, err := hash.HashProto(cp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -62,6 +67,8 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.Be
|
||||
// AddCheckpointState adds CheckpointState object to the cache. This method also trims the least
|
||||
// recently added CheckpointState object if the cache size has ready the max cache size limit.
|
||||
func (c *CheckpointStateCache) AddCheckpointState(cp *ethpb.Checkpoint, s state.ReadOnlyBeaconState) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
h, err := hash.HashProto(cp)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
2
beacon-chain/cache/payload_id.go
vendored
2
beacon-chain/cache/payload_id.go
vendored
@@ -57,7 +57,7 @@ func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(slot types.Slot, vId
|
||||
ids, ok := f.slotToProposerAndPayloadIDs[k]
|
||||
// Ok to overwrite if the slot is already set but the cached payload ID is not set.
|
||||
// This combats the re-org case where payload assignment could change at the start of the epoch.
|
||||
var byte8 [vIdLength]byte
|
||||
byte8 := [vIdLength]byte{}
|
||||
if !ok || (ok && bytes.Equal(ids[vIdLength:], byte8[:])) {
|
||||
f.slotToProposerAndPayloadIDs[k] = bs
|
||||
}
|
||||
|
||||
2
beacon-chain/cache/payload_id_test.go
vendored
2
beacon-chain/cache/payload_id_test.go
vendored
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
func TestValidatorPayloadIDsCache_GetAndSaveValidatorPayloadIDs(t *testing.T) {
|
||||
cache := NewProposerPayloadIDsCache()
|
||||
var r [32]byte
|
||||
r := [32]byte{}
|
||||
i, p, ok := cache.GetProposerPayloadIDs(0, r)
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, types.ValidatorIndex(0), i)
|
||||
|
||||
@@ -52,8 +52,9 @@ func (c *SyncCommitteeHeadStateCache) Get(slot types.Slot) (state.BeaconState, e
|
||||
if !ok {
|
||||
return nil, ErrIncorrectType
|
||||
}
|
||||
// Sync committee is not supported in phase 0.
|
||||
if st.Version() == version.Phase0 {
|
||||
switch st.Version() {
|
||||
case version.Altair, version.Bellatrix, version.Capella:
|
||||
default:
|
||||
return nil, ErrIncorrectType
|
||||
}
|
||||
return st, nil
|
||||
|
||||
@@ -33,13 +33,6 @@ func TestSyncCommitteeHeadState(t *testing.T) {
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
capellaState, err := state_native.InitializeFromProtoCapella(ðpb.BeaconStateCapella{
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
type put struct {
|
||||
slot types.Slot
|
||||
state state.BeaconState
|
||||
@@ -113,15 +106,6 @@ func TestSyncCommitteeHeadState(t *testing.T) {
|
||||
},
|
||||
want: bellatrixState,
|
||||
},
|
||||
{
|
||||
name: "found with key (capella state)",
|
||||
key: types.Slot(200),
|
||||
put: &put{
|
||||
slot: types.Slot(200),
|
||||
state: capellaState,
|
||||
},
|
||||
want: capellaState,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -14,7 +14,14 @@ go_library(
|
||||
"upgrade.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/altair",
|
||||
visibility = ["//visibility:public"],
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd/prysmctl/testnet:__pkg__",
|
||||
"//testing/endtoend/evaluators:__subpackages__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
"//testing/util:__pkg__",
|
||||
"//validator/client:__pkg__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
|
||||
@@ -256,7 +256,7 @@ func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) {
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
}
|
||||
var zeroSig [96]byte
|
||||
zeroSig := [96]byte{}
|
||||
att.Signature = zeroSig[:]
|
||||
|
||||
ckp := beaconState.CurrentJustifiedCheckpoint()
|
||||
|
||||
@@ -3,6 +3,8 @@ package altair
|
||||
import (
|
||||
"context"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
@@ -29,6 +31,7 @@ func InitializePrecomputeValidators(ctx context.Context, beaconState state.Beaco
|
||||
// This shouldn't happen with a correct beacon state,
|
||||
// but rather be safe to defend against index out of bound panics.
|
||||
if beaconState.NumValidators() != len(inactivityScores) {
|
||||
log.Errorf("NumValidators=%d inactivityScores=%d", beaconState.NumValidators(), len(inactivityScores))
|
||||
return nil, nil, errors.New("num of validators is different than num of inactivity scores")
|
||||
}
|
||||
if err := beaconState.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
|
||||
@@ -93,7 +93,7 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) (state.BeaconSta
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = e.ProcessHistoricalDataUpdate(state)
|
||||
state, err = e.ProcessHistoricalRootsUpdate(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -67,10 +67,6 @@ func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.Beacon
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
numValidators := state.NumValidators()
|
||||
hrs, err := state.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.BeaconStateAltair{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
@@ -83,7 +79,7 @@ func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.Beacon
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: hrs,
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
|
||||
@@ -82,11 +82,7 @@ func TestUpgradeToAltair(t *testing.T) {
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), aState.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), aState.BlockRoots())
|
||||
require.DeepSSZEqual(t, preForkState.StateRoots(), aState.StateRoots())
|
||||
r1, err := preForkState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
r2, err := aState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, r1, r2)
|
||||
require.DeepSSZEqual(t, preForkState.HistoricalRoots(), aState.HistoricalRoots())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1Data(), aState.Eth1Data())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), aState.Eth1DataVotes())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), aState.Eth1DepositIndex())
|
||||
|
||||
@@ -99,7 +99,6 @@ go_test(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation:go_default_library",
|
||||
|
||||
@@ -64,7 +64,7 @@ func TestVerifyAttestationNoVerifySignature_IncorrectSourceEpoch(t *testing.T) {
|
||||
AggregationBits: aggBits,
|
||||
}
|
||||
|
||||
var zeroSig [96]byte
|
||||
zeroSig := [96]byte{}
|
||||
att.Signature = zeroSig[:]
|
||||
|
||||
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
|
||||
@@ -113,7 +113,7 @@ func TestProcessAttestationsNoVerify_OK(t *testing.T) {
|
||||
AggregationBits: aggBits,
|
||||
}
|
||||
|
||||
var zeroSig [fieldparams.BLSSignatureLength]byte
|
||||
zeroSig := [fieldparams.BLSSignatureLength]byte{}
|
||||
att.Signature = zeroSig[:]
|
||||
|
||||
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
@@ -144,7 +144,7 @@ func TestVerifyAttestationNoVerifySignature_OK(t *testing.T) {
|
||||
AggregationBits: aggBits,
|
||||
}
|
||||
|
||||
var zeroSig [fieldparams.BLSSignatureLength]byte
|
||||
zeroSig := [fieldparams.BLSSignatureLength]byte{}
|
||||
att.Signature = zeroSig[:]
|
||||
|
||||
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
@@ -172,7 +172,7 @@ func TestVerifyAttestationNoVerifySignature_BadAttIdx(t *testing.T) {
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
}
|
||||
var zeroSig [fieldparams.BLSSignatureLength]byte
|
||||
zeroSig := [fieldparams.BLSSignatureLength]byte{}
|
||||
att.Signature = zeroSig[:]
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()+params.BeaconConfig().MinAttestationInclusionDelay))
|
||||
ckp := beaconState.CurrentJustifiedCheckpoint()
|
||||
|
||||
@@ -84,7 +84,7 @@ func ProcessAttesterSlashing(
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotient
|
||||
case beaconState.Version() == version.Altair:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
|
||||
case beaconState.Version() >= version.Bellatrix:
|
||||
case beaconState.Version() == version.Bellatrix, beaconState.Version() == version.Capella:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientBellatrix
|
||||
default:
|
||||
return nil, errors.New("unknown state version")
|
||||
|
||||
@@ -55,9 +55,9 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) {
|
||||
func TestFuzzverifyDepositDataSigningRoot_10000(_ *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
var ba []byte
|
||||
var pubkey [fieldparams.BLSPubkeyLength]byte
|
||||
var sig [96]byte
|
||||
var domain [4]byte
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{}
|
||||
sig := [96]byte{}
|
||||
domain := [4]byte{}
|
||||
var p []byte
|
||||
var s []byte
|
||||
var d []byte
|
||||
|
||||
@@ -131,118 +131,3 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
|
||||
helpers.ActivationExitEpoch(types.Epoch(state.Slot()/params.BeaconConfig().SlotsPerEpoch)), newRegistry[0].ExitEpoch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyExitAndSignature(t *testing.T) {
|
||||
type args struct {
|
||||
currentSlot types.Slot
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
setup func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error)
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "Empty Exit",
|
||||
args: args{
|
||||
currentSlot: 0,
|
||||
},
|
||||
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error) {
|
||||
fork := ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
}
|
||||
genesisRoot := [32]byte{'a'}
|
||||
return ðpb.Validator{}, ðpb.SignedVoluntaryExit{}, fork, genesisRoot[:], nil
|
||||
},
|
||||
wantErr: "nil exit",
|
||||
},
|
||||
{
|
||||
name: "Happy Path",
|
||||
args: args{
|
||||
currentSlot: (params.BeaconConfig().SlotsPerEpoch * 2) + 1,
|
||||
},
|
||||
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error) {
|
||||
fork := ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
}
|
||||
signedExit := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 2,
|
||||
ValidatorIndex: 0,
|
||||
},
|
||||
}
|
||||
bs, keys := util.DeterministicGenesisState(t, 1)
|
||||
validator := bs.Validators()[0]
|
||||
validator.ActivationEpoch = 1
|
||||
err := bs.UpdateValidatorAtIndex(0, validator)
|
||||
require.NoError(t, err)
|
||||
sb, err := signing.ComputeDomainAndSign(bs, signedExit.Exit.Epoch, signedExit.Exit, params.BeaconConfig().DomainVoluntaryExit, keys[0])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
signedExit.Signature = sig.Marshal()
|
||||
return validator, signedExit, fork, bs.GenesisValidatorsRoot(), nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bad signature",
|
||||
args: args{
|
||||
currentSlot: (params.BeaconConfig().SlotsPerEpoch * 2) + 1,
|
||||
},
|
||||
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error) {
|
||||
fork := ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
}
|
||||
signedExit := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 2,
|
||||
ValidatorIndex: 0,
|
||||
},
|
||||
}
|
||||
bs, keys := util.DeterministicGenesisState(t, 1)
|
||||
validator := bs.Validators()[0]
|
||||
validator.ActivationEpoch = 1
|
||||
|
||||
sb, err := signing.ComputeDomainAndSign(bs, signedExit.Exit.Epoch, signedExit.Exit, params.BeaconConfig().DomainVoluntaryExit, keys[0])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
signedExit.Signature = sig.Marshal()
|
||||
genesisRoot := [32]byte{'a'}
|
||||
// use wrong genesis root and don't update validator
|
||||
return validator, signedExit, fork, genesisRoot[:], nil
|
||||
},
|
||||
wantErr: "signature did not verify",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := params.BeaconConfig().ShardCommitteePeriod
|
||||
params.BeaconConfig().ShardCommitteePeriod = 0
|
||||
validator, signedExit, fork, genesisRoot, err := tt.setup()
|
||||
require.NoError(t, err)
|
||||
rvalidator, err := state_native.NewValidator(validator)
|
||||
require.NoError(t, err)
|
||||
err = blocks.VerifyExitAndSignature(
|
||||
rvalidator,
|
||||
tt.args.currentSlot,
|
||||
fork,
|
||||
signedExit,
|
||||
genesisRoot,
|
||||
)
|
||||
if tt.wantErr == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.ErrorContains(t, tt.wantErr, err)
|
||||
}
|
||||
params.BeaconConfig().ShardCommitteePeriod = c // prevent contamination
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
@@ -37,13 +35,9 @@ func NewGenesisBlock(stateRoot []byte) *ethpb.SignedBeaconBlock {
|
||||
return block
|
||||
}
|
||||
|
||||
var ErrUnrecognizedState = errors.New("unknown underlying type for state.BeaconState value")
|
||||
var ErrUnrecognizedState = errors.New("uknonwn underlying type for state.BeaconState value")
|
||||
|
||||
func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfaces.SignedBeaconBlock, error) {
|
||||
root, err := st.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func NewGenesisBlockForState(root [32]byte, st state.BeaconState) (interfaces.SignedBeaconBlock, error) {
|
||||
ps := st.ToProto()
|
||||
switch ps.(type) {
|
||||
case *ethpb.BeaconState:
|
||||
@@ -62,26 +56,6 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
|
||||
},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
case *ethpb.BeaconStateAltair:
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockAltair{
|
||||
Block: ðpb.BeaconBlockAltair{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
StateRoot: root[:],
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
RandaoReveal: make([]byte, fieldparams.BLSSignatureLength),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
case *ethpb.BeaconStateBellatrix:
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
@@ -113,38 +87,6 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
|
||||
},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
case *ethpb.BeaconStateCapella:
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockCapella{
|
||||
Block: ðpb.BeaconBlockCapella{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
StateRoot: root[:],
|
||||
Body: ðpb.BeaconBlockBodyCapella{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadCapella{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
default:
|
||||
return nil, ErrUnrecognizedState
|
||||
}
|
||||
|
||||
@@ -133,6 +133,7 @@ func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload interfaces.
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(payload.ParentHash(), header.BlockHash()) {
|
||||
log.Errorf("parent Hash %#x, header Hash: %#x", bytesutil.Trunc(payload.ParentHash()), bytesutil.Trunc(header.BlockHash()))
|
||||
return ErrInvalidPayloadBlockHash
|
||||
}
|
||||
return nil
|
||||
@@ -281,7 +282,7 @@ func ProcessPayloadHeader(st state.BeaconState, header interfaces.ExecutionData)
|
||||
|
||||
// GetBlockPayloadHash returns the hash of the execution payload of the block
|
||||
func GetBlockPayloadHash(blk interfaces.BeaconBlock) ([32]byte, error) {
|
||||
var payloadHash [32]byte
|
||||
payloadHash := [32]byte{}
|
||||
if IsPreBellatrixVersion(blk.Version()) {
|
||||
return payloadHash, nil
|
||||
}
|
||||
|
||||
@@ -7,12 +7,14 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/ssz"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
@@ -348,6 +350,16 @@ func Test_IsExecutionEnabled(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_IsExecutionEnabledCapella(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateCapella(t, 1)
|
||||
blk := util.NewBeaconBlockCapella()
|
||||
body, err := consensusblocks.NewBeaconBlockBody(blk.Block.Body)
|
||||
require.NoError(t, err)
|
||||
got, err := blocks.IsExecutionEnabled(st, body)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, got)
|
||||
}
|
||||
|
||||
func Test_IsExecutionEnabledUsingHeader(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -601,14 +613,13 @@ func Test_ProcessPayload(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_ProcessPayloadCapella(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateCapella(t, 1)
|
||||
spb := ðpb.BeaconStateCapella{}
|
||||
st, err := state_native.InitializeFromProtoCapella(spb)
|
||||
require.NoError(t, err)
|
||||
header, err := emptyPayloadHeaderCapella()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(header))
|
||||
payload := emptyPayloadCapella()
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
payload.PrevRandao = random
|
||||
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload)
|
||||
require.NoError(t, err)
|
||||
_, err = blocks.ProcessPayload(st, wrapped)
|
||||
|
||||
@@ -82,7 +82,7 @@ func ProcessProposerSlashing(
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotient
|
||||
case beaconState.Version() == version.Altair:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
|
||||
case beaconState.Version() >= version.Bellatrix:
|
||||
case beaconState.Version() == version.Bellatrix, beaconState.Version() == version.Capella:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientBellatrix
|
||||
default:
|
||||
return nil, errors.New("unknown state version")
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
// retrieves the signature batch from the raw data, public key,signature and domain provided.
|
||||
func signatureBatch(signedData, pub, signature, domain []byte, desc string) (*bls.SignatureBatch, error) {
|
||||
func signatureBatch(signedData, pub, signature, domain []byte) (*bls.SignatureBatch, error) {
|
||||
publicKey, err := bls.PublicKeyFromBytes(pub)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert bytes to public key")
|
||||
@@ -33,16 +33,15 @@ func signatureBatch(signedData, pub, signature, domain []byte, desc string) (*bl
|
||||
return nil, errors.Wrap(err, "could not hash container")
|
||||
}
|
||||
return &bls.SignatureBatch{
|
||||
Signatures: [][]byte{signature},
|
||||
PublicKeys: []bls.PublicKey{publicKey},
|
||||
Messages: [][32]byte{root},
|
||||
Descriptions: []string{desc},
|
||||
Signatures: [][]byte{signature},
|
||||
PublicKeys: []bls.PublicKey{publicKey},
|
||||
Messages: [][32]byte{root},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// verifies the signature from the raw data, public key and domain provided.
|
||||
func verifySignature(signedData, pub, signature, domain []byte) error {
|
||||
set, err := signatureBatch(signedData, pub, signature, domain, signing.UnknownSignature)
|
||||
set, err := signatureBatch(signedData, pub, signature, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -147,7 +146,7 @@ func RandaoSignatureBatch(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
set, err := signatureBatch(buf, proposerPub, reveal, domain, signing.RandaoSignature)
|
||||
set, err := signatureBatch(buf, proposerPub, reveal, domain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -187,7 +186,6 @@ func createAttestationSignatureBatch(
|
||||
sigs := make([][]byte, len(atts))
|
||||
pks := make([]bls.PublicKey, len(atts))
|
||||
msgs := make([][32]byte, len(atts))
|
||||
descs := make([]string, len(atts))
|
||||
for i, a := range atts {
|
||||
sigs[i] = a.Signature
|
||||
c, err := helpers.BeaconCommitteeFromState(ctx, beaconState, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
@@ -218,14 +216,11 @@ func createAttestationSignatureBatch(
|
||||
return nil, errors.Wrap(err, "could not get signing root of object")
|
||||
}
|
||||
msgs[i] = root
|
||||
|
||||
descs[i] = signing.AttestationSignature
|
||||
}
|
||||
return &bls.SignatureBatch{
|
||||
Signatures: sigs,
|
||||
PublicKeys: pks,
|
||||
Messages: msgs,
|
||||
Descriptions: descs,
|
||||
Signatures: sigs,
|
||||
PublicKeys: pks,
|
||||
Messages: msgs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -17,10 +17,15 @@ import (
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v3/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
)
|
||||
|
||||
const executionToBLSPadding = 12
|
||||
|
||||
var ErrInvalidBLSChangeBeforeCapella = errors.New("cannot process BLSToExecutionChange before Capella fork")
|
||||
|
||||
// ProcessBLSToExecutionChanges process all signed BLS_TO_EXECUTION_MESSAGE found in the block.
|
||||
// It does not verify their signature.
|
||||
func ProcessBLSToExecutionChanges(
|
||||
st state.BeaconState,
|
||||
signed interfaces.SignedBeaconBlock) (state.BeaconState, error) {
|
||||
@@ -83,6 +88,9 @@ func processBLSToExecutionChange(st state.BeaconState, signed *ethpb.SignedBLSTo
|
||||
// ValidateBLSToExecutionChange validates the execution change message against the state and returns the
|
||||
// validator referenced by the message.
|
||||
func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.SignedBLSToExecutionChange) (*ethpb.Validator, error) {
|
||||
if st.Version() < version.Capella {
|
||||
return nil, ErrInvalidBLSChangeBeforeCapella
|
||||
}
|
||||
if signed == nil {
|
||||
return nil, errNilSignedWithdrawalMessage
|
||||
}
|
||||
@@ -123,6 +131,7 @@ func ProcessWithdrawals(st state.BeaconState, withdrawals []*enginev1.Withdrawal
|
||||
return nil, errInvalidWithdrawalIndex
|
||||
}
|
||||
if withdrawal.ValidatorIndex != expected[i].ValidatorIndex {
|
||||
log.Errorf("Expected %v, got %v", expected[i], withdrawal)
|
||||
return nil, errInvalidValidatorIndex
|
||||
}
|
||||
if !bytes.Equal(withdrawal.Address, expected[i].Address) {
|
||||
@@ -140,23 +149,13 @@ func ProcessWithdrawals(st state.BeaconState, withdrawals []*enginev1.Withdrawal
|
||||
if err := st.SetNextWithdrawalIndex(withdrawals[len(withdrawals)-1].Index + 1); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set next withdrawal index")
|
||||
}
|
||||
}
|
||||
var nextValidatorIndex types.ValidatorIndex
|
||||
if uint64(len(withdrawals)) < params.BeaconConfig().MaxWithdrawalsPerPayload {
|
||||
nextValidatorIndex, err = st.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get next withdrawal validator index")
|
||||
}
|
||||
nextValidatorIndex += types.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
nextValidatorIndex = nextValidatorIndex % types.ValidatorIndex(st.NumValidators())
|
||||
} else {
|
||||
nextValidatorIndex = withdrawals[len(withdrawals)-1].ValidatorIndex + 1
|
||||
nextValidatorIndex := withdrawals[len(withdrawals)-1].ValidatorIndex + 1
|
||||
if nextValidatorIndex == types.ValidatorIndex(st.NumValidators()) {
|
||||
nextValidatorIndex = 0
|
||||
}
|
||||
}
|
||||
if err := st.SetNextWithdrawalValidatorIndex(nextValidatorIndex); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set next withdrawal validator index")
|
||||
if err := st.SetNextWithdrawalValidatorIndex(nextValidatorIndex); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set next withdrawal validator index")
|
||||
}
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
@@ -170,15 +169,14 @@ func BLSChangesSignatureBatch(
|
||||
return bls.NewSet(), nil
|
||||
}
|
||||
batch := &bls.SignatureBatch{
|
||||
Signatures: make([][]byte, len(changes)),
|
||||
PublicKeys: make([]bls.PublicKey, len(changes)),
|
||||
Messages: make([][32]byte, len(changes)),
|
||||
Descriptions: make([]string, len(changes)),
|
||||
Signatures: make([][]byte, len(changes)),
|
||||
PublicKeys: make([]bls.PublicKey, len(changes)),
|
||||
Messages: make([][32]byte, len(changes)),
|
||||
}
|
||||
c := params.BeaconConfig()
|
||||
domain, err := signing.ComputeDomain(c.DomainBLSToExecutionChange, c.GenesisForkVersion, st.GenesisValidatorsRoot())
|
||||
epoch := slots.ToEpoch(st.Slot())
|
||||
domain, err := signing.Domain(st.Fork(), epoch, params.BeaconConfig().DomainBLSToExecutionChange, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute signing domain")
|
||||
return nil, err
|
||||
}
|
||||
for i, change := range changes {
|
||||
batch.Signatures[i] = change.Signature
|
||||
@@ -192,20 +190,17 @@ func BLSChangesSignatureBatch(
|
||||
return nil, errors.Wrap(err, "could not compute BLSToExecutionChange signing data")
|
||||
}
|
||||
batch.Messages[i] = htr
|
||||
batch.Descriptions[i] = signing.BlsChangeSignature
|
||||
}
|
||||
return batch, nil
|
||||
}
|
||||
|
||||
// VerifyBLSChangeSignature checks the signature in the SignedBLSToExecutionChange message.
|
||||
// It validates the signature with the Capella fork version if the passed state
|
||||
// is from a previous fork.
|
||||
func VerifyBLSChangeSignature(
|
||||
st state.BeaconState,
|
||||
change *ethpbv2.SignedBLSToExecutionChange,
|
||||
) error {
|
||||
c := params.BeaconConfig()
|
||||
domain, err := signing.ComputeDomain(c.DomainBLSToExecutionChange, c.GenesisForkVersion, st.GenesisValidatorsRoot())
|
||||
epoch := slots.ToEpoch(st.Slot())
|
||||
domain, err := signing.Domain(st.Fork(), epoch, params.BeaconConfig().DomainBLSToExecutionChange, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute signing domain")
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/ssz"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/proto/migration"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
@@ -245,7 +244,6 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
numValidators = 128
|
||||
notWithdrawableIndex = 127
|
||||
notPartiallyWithdrawable = 126
|
||||
maxSweep = uint64(80)
|
||||
)
|
||||
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
|
||||
|
||||
@@ -277,18 +275,18 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
}
|
||||
fullWithdrawal := func(i types.ValidatorIndex, idx uint64) *enginev1.Withdrawal {
|
||||
return &enginev1.Withdrawal{
|
||||
Index: idx,
|
||||
ValidatorIndex: i,
|
||||
Address: executionAddress(i),
|
||||
Amount: withdrawalAmount(i),
|
||||
WithdrawalIndex: idx,
|
||||
ValidatorIndex: i,
|
||||
ExecutionAddress: executionAddress(i),
|
||||
Amount: withdrawalAmount(i),
|
||||
}
|
||||
}
|
||||
partialWithdrawal := func(i types.ValidatorIndex, idx uint64) *enginev1.Withdrawal {
|
||||
return &enginev1.Withdrawal{
|
||||
Index: idx,
|
||||
ValidatorIndex: i,
|
||||
Address: executionAddress(i),
|
||||
Amount: withdrawalAmount(i) - maxEffectiveBalance,
|
||||
WithdrawalIndex: idx,
|
||||
ValidatorIndex: i,
|
||||
ExecutionAddress: executionAddress(i),
|
||||
Amount: withdrawalAmount(i) - maxEffectiveBalance,
|
||||
}
|
||||
}
|
||||
tests := []Test{
|
||||
@@ -299,7 +297,7 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
NextWithdrawalIndex: 3,
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 90,
|
||||
NextWithdrawalValidatorIndex: 10,
|
||||
NextWithdrawalIndex: 3,
|
||||
},
|
||||
},
|
||||
@@ -308,29 +306,29 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
Name: "success one full withdrawal",
|
||||
NextWithdrawalIndex: 3,
|
||||
NextWithdrawalValidatorIndex: 5,
|
||||
FullWithdrawalIndices: []types.ValidatorIndex{70},
|
||||
FullWithdrawalIndices: []types.ValidatorIndex{1},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
fullWithdrawal(70, 3),
|
||||
fullWithdrawal(1, 3),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 85,
|
||||
NextWithdrawalValidatorIndex: 2,
|
||||
NextWithdrawalIndex: 4,
|
||||
Balances: map[uint64]uint64{70: 0},
|
||||
Balances: map[uint64]uint64{1: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "success one partial withdrawal",
|
||||
NextWithdrawalIndex: 21,
|
||||
NextWithdrawalValidatorIndex: 120,
|
||||
NextWithdrawalValidatorIndex: 37,
|
||||
PartialWithdrawalIndices: []types.ValidatorIndex{7},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
partialWithdrawal(7, 21),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 72,
|
||||
NextWithdrawalValidatorIndex: 8,
|
||||
NextWithdrawalIndex: 22,
|
||||
Balances: map[uint64]uint64{7: maxEffectiveBalance},
|
||||
},
|
||||
@@ -343,45 +341,13 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
FullWithdrawalIndices: []types.ValidatorIndex{7, 19, 28, 1},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
fullWithdrawal(7, 22), fullWithdrawal(19, 23), fullWithdrawal(28, 24),
|
||||
fullWithdrawal(1, 25),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 84,
|
||||
NextWithdrawalIndex: 25,
|
||||
Balances: map[uint64]uint64{7: 0, 19: 0, 28: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "Less than max sweep at end",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
FullWithdrawalIndices: []types.ValidatorIndex{80, 81, 82, 83},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
fullWithdrawal(80, 22), fullWithdrawal(81, 23), fullWithdrawal(82, 24),
|
||||
fullWithdrawal(83, 25),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 84,
|
||||
NextWithdrawalValidatorIndex: 2,
|
||||
NextWithdrawalIndex: 26,
|
||||
Balances: map[uint64]uint64{80: 0, 81: 0, 82: 0, 83: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "Less than max sweep and beginning",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
FullWithdrawalIndices: []types.ValidatorIndex{4, 5, 6},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
fullWithdrawal(4, 22), fullWithdrawal(5, 23), fullWithdrawal(6, 24),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 84,
|
||||
NextWithdrawalIndex: 25,
|
||||
Balances: map[uint64]uint64{4: 0, 5: 0, 6: 0},
|
||||
Balances: map[uint64]uint64{7: 0, 19: 0, 28: 0, 1: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -389,18 +355,20 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
Name: "success many partial withdrawals",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
PartialWithdrawalIndices: []types.ValidatorIndex{7, 19, 28},
|
||||
PartialWithdrawalIndices: []types.ValidatorIndex{7, 19, 28, 1},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
partialWithdrawal(7, 22), partialWithdrawal(19, 23), partialWithdrawal(28, 24),
|
||||
partialWithdrawal(1, 25),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 84,
|
||||
NextWithdrawalIndex: 25,
|
||||
NextWithdrawalValidatorIndex: 2,
|
||||
NextWithdrawalIndex: 26,
|
||||
Balances: map[uint64]uint64{
|
||||
7: maxEffectiveBalance,
|
||||
19: maxEffectiveBalance,
|
||||
28: maxEffectiveBalance,
|
||||
1: maxEffectiveBalance,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -408,17 +376,17 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
Args: args{
|
||||
Name: "success many withdrawals",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 88,
|
||||
NextWithdrawalValidatorIndex: 12,
|
||||
FullWithdrawalIndices: []types.ValidatorIndex{7, 19, 28},
|
||||
PartialWithdrawalIndices: []types.ValidatorIndex{2, 1, 89, 15},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
partialWithdrawal(89, 22), partialWithdrawal(1, 23), partialWithdrawal(2, 24),
|
||||
fullWithdrawal(7, 25), partialWithdrawal(15, 26), fullWithdrawal(19, 27),
|
||||
fullWithdrawal(28, 28),
|
||||
partialWithdrawal(15, 22), fullWithdrawal(19, 23), fullWithdrawal(28, 24),
|
||||
partialWithdrawal(89, 25), partialWithdrawal(1, 26), partialWithdrawal(2, 27),
|
||||
fullWithdrawal(7, 28),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 40,
|
||||
NextWithdrawalValidatorIndex: 8,
|
||||
NextWithdrawalIndex: 29,
|
||||
Balances: map[uint64]uint64{
|
||||
7: 0, 19: 0, 28: 0,
|
||||
@@ -621,8 +589,6 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.Args.Name, func(t *testing.T) {
|
||||
saved := params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep
|
||||
params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep = maxSweep
|
||||
if test.Args.Withdrawals == nil {
|
||||
test.Args.Withdrawals = make([]*enginev1.Withdrawal, 0)
|
||||
}
|
||||
@@ -648,7 +614,6 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
checkPostState(t, test.Control, post)
|
||||
}
|
||||
params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep = saved
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -785,162 +750,4 @@ func TestBLSChangesSignatureBatch(t *testing.T) {
|
||||
verify, err := batch.Verify()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verify)
|
||||
|
||||
// Verify a single change
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, change))
|
||||
}
|
||||
|
||||
func TestBLSChangesSignatureBatchWrongFork(t *testing.T) {
|
||||
spb := ðpb.BeaconStateCapella{
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
},
|
||||
}
|
||||
numValidators := 10
|
||||
validators := make([]*ethpb.Validator, numValidators)
|
||||
blsChanges := make([]*ethpb.BLSToExecutionChange, numValidators)
|
||||
spb.Balances = make([]uint64, numValidators)
|
||||
privKeys := make([]common.SecretKey, numValidators)
|
||||
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
|
||||
executionAddress := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13}
|
||||
|
||||
for i := range validators {
|
||||
v := ðpb.Validator{}
|
||||
v.EffectiveBalance = maxEffectiveBalance
|
||||
v.WithdrawableEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
v.WithdrawalCredentials = make([]byte, 32)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
privKeys[i] = priv
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ðpb.BLSToExecutionChange{
|
||||
ToExecutionAddress: executionAddress,
|
||||
ValidatorIndex: types.ValidatorIndex(i),
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
|
||||
digest := hashFn.Hash(pubkey)
|
||||
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
copy(v.WithdrawalCredentials, digest[:])
|
||||
validators[i] = v
|
||||
blsChanges[i] = message
|
||||
}
|
||||
spb.Validators = validators
|
||||
st, err := state_native.InitializeFromProtoCapella(spb)
|
||||
require.NoError(t, err)
|
||||
|
||||
signedChanges := make([]*ethpb.SignedBLSToExecutionChange, numValidators)
|
||||
for i, message := range blsChanges {
|
||||
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
Message: message,
|
||||
Signature: signature,
|
||||
}
|
||||
signedChanges[i] = signed
|
||||
}
|
||||
batch, err := blocks.BLSChangesSignatureBatch(st, signedChanges)
|
||||
require.NoError(t, err)
|
||||
verify, err := batch.Verify()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, verify)
|
||||
|
||||
// Verify a single change
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.ErrorIs(t, signing.ErrSigFailedToVerify, blocks.VerifyBLSChangeSignature(st, change))
|
||||
}
|
||||
|
||||
func TestBLSChangesSignatureBatchFromBellatrix(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
savedConfig := cfg.Copy()
|
||||
cfg.CapellaForkEpoch = cfg.BellatrixForkEpoch.AddEpoch(2)
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
spb := ðpb.BeaconStateBellatrix{
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().BellatrixForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().AltairForkVersion,
|
||||
Epoch: params.BeaconConfig().BellatrixForkEpoch,
|
||||
},
|
||||
}
|
||||
numValidators := 10
|
||||
validators := make([]*ethpb.Validator, numValidators)
|
||||
blsChanges := make([]*ethpb.BLSToExecutionChange, numValidators)
|
||||
spb.Balances = make([]uint64, numValidators)
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
spb.Slot = slot
|
||||
|
||||
privKeys := make([]common.SecretKey, numValidators)
|
||||
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
|
||||
executionAddress := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13}
|
||||
|
||||
for i := range validators {
|
||||
v := ðpb.Validator{}
|
||||
v.EffectiveBalance = maxEffectiveBalance
|
||||
v.WithdrawableEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
v.WithdrawalCredentials = make([]byte, 32)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
privKeys[i] = priv
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ðpb.BLSToExecutionChange{
|
||||
ToExecutionAddress: executionAddress,
|
||||
ValidatorIndex: types.ValidatorIndex(i),
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
|
||||
digest := hashFn.Hash(pubkey)
|
||||
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
|
||||
copy(v.WithdrawalCredentials, digest[:])
|
||||
validators[i] = v
|
||||
blsChanges[i] = message
|
||||
}
|
||||
spb.Validators = validators
|
||||
st, err := state_native.InitializeFromProtoBellatrix(spb)
|
||||
require.NoError(t, err)
|
||||
|
||||
signedChanges := make([]*ethpb.SignedBLSToExecutionChange, numValidators)
|
||||
spc := ðpb.BeaconStateCapella{
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
},
|
||||
}
|
||||
slot, err = slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
require.NoError(t, err)
|
||||
spc.Slot = slot
|
||||
|
||||
stc, err := state_native.InitializeFromProtoCapella(spc)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, message := range blsChanges {
|
||||
signature, err := signing.ComputeDomainAndSign(stc, 0, message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
Message: message,
|
||||
Signature: signature,
|
||||
}
|
||||
signedChanges[i] = signed
|
||||
}
|
||||
batch, err := blocks.BLSChangesSignatureBatch(st, signedChanges)
|
||||
require.NoError(t, err)
|
||||
verify, err := batch.Verify()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verify)
|
||||
|
||||
// Verify a single change
|
||||
change := migration.V1Alpha1SignedBLSToExecChangeToV2(signedChanges[0])
|
||||
require.NoError(t, blocks.VerifyBLSChangeSignature(st, change))
|
||||
params.OverrideBeaconConfig(savedConfig)
|
||||
}
|
||||
|
||||
@@ -42,10 +42,6 @@ func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hrs, err := state.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.BeaconStateCapella{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
@@ -58,7 +54,7 @@ func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: hrs,
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
@@ -94,7 +90,6 @@ func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
|
||||
},
|
||||
NextWithdrawalIndex: 0,
|
||||
NextWithdrawalValidatorIndex: 0,
|
||||
HistoricalSummaries: make([]*ethpb.HistoricalSummary, 0),
|
||||
}
|
||||
|
||||
return state_native.InitializeFromProtoUnsafeCapella(s)
|
||||
|
||||
@@ -25,6 +25,7 @@ func TestUpgradeToCapella(t *testing.T) {
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
|
||||
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
|
||||
require.DeepSSZEqual(t, preForkState.HistoricalRoots(), mSt.HistoricalRoots())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), mSt.Eth1DepositIndex())
|
||||
@@ -97,8 +98,4 @@ func TestUpgradeToCapella(t *testing.T) {
|
||||
lwvi, err := mSt.NextWithdrawalValidatorIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, types.ValidatorIndex(0), lwvi)
|
||||
|
||||
summaries, err := mSt.HistoricalSummaries()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(summaries))
|
||||
}
|
||||
|
||||
@@ -13,14 +13,11 @@ go_library(
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -36,10 +33,8 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -14,14 +14,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stateutil"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
)
|
||||
|
||||
// sortableIndices implements the Sort interface to sort newly activated validator indices
|
||||
@@ -352,39 +349,33 @@ func ProcessRandaoMixesReset(state state.BeaconState) (state.BeaconState, error)
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// ProcessHistoricalDataUpdate processes the updates to historical data during epoch processing.
|
||||
// From Capella onward, per spec,state's historical summaries are updated instead of historical roots.
|
||||
func ProcessHistoricalDataUpdate(state state.BeaconState) (state.BeaconState, error) {
|
||||
// ProcessHistoricalRootsUpdate processes the updates to historical root accumulator during epoch processing.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_historical_roots_update(state: BeaconState) -> None:
|
||||
// # Set historical root accumulator
|
||||
// next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
// if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0:
|
||||
// historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots)
|
||||
// state.historical_roots.append(hash_tree_root(historical_batch))
|
||||
func ProcessHistoricalRootsUpdate(state state.BeaconState) (state.BeaconState, error) {
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
nextEpoch := currentEpoch + 1
|
||||
|
||||
// Set historical root accumulator.
|
||||
epochsPerHistoricalRoot := params.BeaconConfig().SlotsPerHistoricalRoot.DivSlot(params.BeaconConfig().SlotsPerEpoch)
|
||||
if nextEpoch.Mod(uint64(epochsPerHistoricalRoot)) == 0 {
|
||||
if state.Version() >= version.Capella {
|
||||
br, err := stateutil.ArraysRoot(state.BlockRoots(), fieldparams.BlockRootsLength)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sr, err := stateutil.ArraysRoot(state.StateRoots(), fieldparams.StateRootsLength)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := state.AppendHistoricalSummaries(ðpb.HistoricalSummary{BlockSummaryRoot: br[:], StateSummaryRoot: sr[:]}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
historicalBatch := ðpb.HistoricalBatch{
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
}
|
||||
batchRoot, err := historicalBatch.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not hash historical batch")
|
||||
}
|
||||
if err := state.AppendHistoricalRoots(batchRoot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
historicalBatch := ðpb.HistoricalBatch{
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
}
|
||||
batchRoot, err := historicalBatch.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not hash historical batch")
|
||||
}
|
||||
if err := state.AppendHistoricalRoots(batchRoot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -435,7 +426,7 @@ func ProcessFinalUpdates(state state.BeaconState) (state.BeaconState, error) {
|
||||
}
|
||||
|
||||
// Set historical root accumulator.
|
||||
state, err = ProcessHistoricalDataUpdate(state)
|
||||
state, err = ProcessHistoricalRootsUpdate(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -10,10 +10,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stateutil"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
@@ -276,9 +274,7 @@ func TestProcessFinalUpdates_CanProcess(t *testing.T) {
|
||||
assert.DeepNotEqual(t, params.BeaconConfig().ZeroHash[:], mix, "latest RANDAO still zero hashes")
|
||||
|
||||
// Verify historical root accumulator was appended.
|
||||
roots, err := newS.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(roots), "Unexpected slashed balance")
|
||||
assert.Equal(t, 1, len(newS.HistoricalRoots()), "Unexpected slashed balance")
|
||||
currAtt, err := newS.CurrentEpochAttestations()
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, currAtt, "Nil value stored in current epoch attestations instead of empty slice")
|
||||
@@ -459,83 +455,3 @@ func TestProcessSlashings_BadValue(t *testing.T) {
|
||||
_, err = epoch.ProcessSlashings(s, params.BeaconConfig().ProportionalSlashingMultiplier)
|
||||
require.ErrorContains(t, "addition overflows", err)
|
||||
}
|
||||
|
||||
func TestProcessHistoricalDataUpdate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
st func() state.BeaconState
|
||||
verifier func(state.BeaconState)
|
||||
}{
|
||||
{
|
||||
name: "no change",
|
||||
st: func() state.BeaconState {
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
return st
|
||||
},
|
||||
verifier: func(st state.BeaconState) {
|
||||
roots, err := st.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(roots))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "before capella can process and get historical root",
|
||||
st: func() state.BeaconState {
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerHistoricalRoot-1)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
},
|
||||
verifier: func(st state.BeaconState) {
|
||||
roots, err := st.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(roots))
|
||||
|
||||
b := ðpb.HistoricalBatch{
|
||||
BlockRoots: st.BlockRoots(),
|
||||
StateRoots: st.StateRoots(),
|
||||
}
|
||||
r, err := b.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, r[:], roots[0])
|
||||
|
||||
_, err = st.HistoricalSummaries()
|
||||
require.ErrorContains(t, "HistoricalSummaries is not supported for phase0", err)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "after capella can process and get historical summary",
|
||||
st: func() state.BeaconState {
|
||||
st, _ := util.DeterministicGenesisStateCapella(t, 1)
|
||||
st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerHistoricalRoot-1)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
},
|
||||
verifier: func(st state.BeaconState) {
|
||||
summaries, err := st.HistoricalSummaries()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(summaries))
|
||||
|
||||
br, err := stateutil.ArraysRoot(st.BlockRoots(), fieldparams.BlockRootsLength)
|
||||
require.NoError(t, err)
|
||||
sr, err := stateutil.ArraysRoot(st.StateRoots(), fieldparams.StateRootsLength)
|
||||
require.NoError(t, err)
|
||||
b := ðpb.HistoricalSummary{
|
||||
BlockSummaryRoot: br[:],
|
||||
StateSummaryRoot: sr[:],
|
||||
}
|
||||
require.DeepEqual(t, b, summaries[0])
|
||||
hrs, err := st.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, hrs, [][]byte{})
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := epoch.ProcessHistoricalDataUpdate(tt.st())
|
||||
require.NoError(t, err)
|
||||
tt.verifier(got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,10 +35,6 @@ func UpgradeToBellatrix(state state.BeaconState) (state.BeaconState, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hrs, err := state.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.BeaconStateBellatrix{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
@@ -51,7 +47,7 @@ func UpgradeToBellatrix(state state.BeaconState) (state.BeaconState, error) {
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: hrs,
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
|
||||
@@ -24,11 +24,7 @@ func TestUpgradeToBellatrix(t *testing.T) {
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
|
||||
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
|
||||
r1, err := preForkState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
r2, err := mSt.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, r1, r2)
|
||||
require.DeepSSZEqual(t, preForkState.HistoricalRoots(), mSt.HistoricalRoots())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), mSt.Eth1DepositIndex())
|
||||
|
||||
@@ -19,9 +19,6 @@ const (
|
||||
|
||||
// SyncCommitteeContributionReceived is sent after a sync committee contribution object has been received.
|
||||
SyncCommitteeContributionReceived
|
||||
|
||||
// BLSToExecutionChangeReceived is sent after a BLS to execution change object has been received from gossip or rpc.
|
||||
BLSToExecutionChangeReceived
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -47,8 +44,3 @@ type SyncCommitteeContributionReceivedData struct {
|
||||
// Contribution is the sync committee contribution object.
|
||||
Contribution *ethpb.SignedContributionAndProof
|
||||
}
|
||||
|
||||
// BLSToExecutionChangeReceivedData is the data sent with BLSToExecutionChangeReceived events.
|
||||
type BLSToExecutionChangeReceivedData struct {
|
||||
Change *ethpb.SignedBLSToExecutionChange
|
||||
}
|
||||
|
||||
@@ -21,32 +21,6 @@ const DomainByteLength = 4
|
||||
// failed to verify.
|
||||
var ErrSigFailedToVerify = errors.New("signature did not verify")
|
||||
|
||||
// List of descriptions for different kinds of signatures
|
||||
const (
|
||||
// UnknownSignature represents all signatures other than below types
|
||||
UnknownSignature string = "unknown signature"
|
||||
// BlockSignature represents the block signature from block proposer
|
||||
BlockSignature = "block signature"
|
||||
// RandaoSignature represents randao specific signature
|
||||
RandaoSignature = "randao signature"
|
||||
// SelectionProof represents selection proof
|
||||
SelectionProof = "selection proof"
|
||||
// AggregatorSignature represents aggregator's signature
|
||||
AggregatorSignature = "aggregator signature"
|
||||
// AttestationSignature represents aggregated attestation signature
|
||||
AttestationSignature = "attestation signature"
|
||||
// BlsChangeSignature represents signature to BLSToExecutionChange
|
||||
BlsChangeSignature = "blschange signature"
|
||||
// SyncCommitteeSignature represents sync committee signature
|
||||
SyncCommitteeSignature = "sync committee signature"
|
||||
// SyncSelectionProof represents sync committee selection proof
|
||||
SyncSelectionProof = "sync selection proof"
|
||||
// ContributionSignature represents sync committee contributor's signature
|
||||
ContributionSignature = "sync committee contribution signature"
|
||||
// SyncAggregateSignature represents sync committee aggregator's signature
|
||||
SyncAggregateSignature = "sync committee aggregator signature"
|
||||
)
|
||||
|
||||
// ComputeDomainAndSign computes the domain and signing root and sign it using the passed in private key.
|
||||
func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch types.Epoch, obj fssz.HashRoot, domain [4]byte, key bls.SecretKey) ([]byte, error) {
|
||||
d, err := Domain(st.Fork(), epoch, domain, st.GenesisValidatorsRoot())
|
||||
@@ -176,12 +150,10 @@ func BlockSignatureBatch(pub, signature, domain []byte, rootFunc func() ([32]byt
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute signing root")
|
||||
}
|
||||
desc := BlockSignature
|
||||
return &bls.SignatureBatch{
|
||||
Signatures: [][]byte{signature},
|
||||
PublicKeys: []bls.PublicKey{publicKey},
|
||||
Messages: [][32]byte{root},
|
||||
Descriptions: []string{desc},
|
||||
Signatures: [][]byte{signature},
|
||||
PublicKeys: []bls.PublicKey{publicKey},
|
||||
Messages: [][32]byte{root},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -206,7 +178,7 @@ func ComputeDomain(domainType [DomainByteLength]byte, forkVersion, genesisValida
|
||||
if genesisValidatorsRoot == nil {
|
||||
genesisValidatorsRoot = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
var forkBytes [ForkVersionByteLength]byte
|
||||
forkBytes := [ForkVersionByteLength]byte{}
|
||||
copy(forkBytes[:], forkVersion)
|
||||
|
||||
forkDataRoot, err := computeForkDataRoot(forkBytes[:], genesisValidatorsRoot)
|
||||
|
||||
@@ -114,9 +114,9 @@ func TestSigningRoot_ComputeForkDigest(t *testing.T) {
|
||||
func TestFuzzverifySigningRoot_10000(_ *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
st := ðpb.BeaconState{}
|
||||
var pubkey [fieldparams.BLSPubkeyLength]byte
|
||||
var sig [96]byte
|
||||
var domain [4]byte
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{}
|
||||
sig := [96]byte{}
|
||||
domain := [4]byte{}
|
||||
var p []byte
|
||||
var s []byte
|
||||
var d []byte
|
||||
|
||||
@@ -12,7 +12,16 @@ go_library(
|
||||
"transition_no_verify_sig.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition",
|
||||
visibility = ["//visibility:public"],
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//runtime/interop:__pkg__",
|
||||
"//testing/endtoend:__pkg__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
"//testing/util:__pkg__",
|
||||
"//tools/benchmark-files-gen:__pkg__",
|
||||
"//tools/genesis-state-gen:__pkg__",
|
||||
"//tools/pcli:__pkg__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
@@ -28,13 +37,11 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
@@ -3,17 +3,21 @@ package transition
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/altair"
|
||||
b "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/altair"
|
||||
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
b "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stateutil"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/container/trie"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -76,43 +75,6 @@ func GenesisBeaconState(ctx context.Context, deposits []*ethpb.Deposit, genesisT
|
||||
return OptimizedGenesisBeaconState(genesisTime, st, st.Eth1Data())
|
||||
}
|
||||
|
||||
// PreminedGenesisBeaconState works almost exactly like GenesisBeaconState, except that it assumes that genesis deposits
|
||||
// are not represented in the deposit contract and are only found in the genesis state validator registry. In order
|
||||
// to ensure the deposit root and count match the empty deposit contract deployed in a testnet genesis block, the root
|
||||
// of an empty deposit trie is computed and used as Eth1Data.deposit_root, and the deposit count is set to 0.
|
||||
func PreminedGenesisBeaconState(ctx context.Context, deposits []*ethpb.Deposit, genesisTime uint64, eth1Data *ethpb.Eth1Data) (state.BeaconState, error) {
|
||||
st, err := EmptyGenesisState()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Process initial deposits.
|
||||
st, err = helpers.UpdateGenesisEth1Data(st, deposits, eth1Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st, err = b.ProcessPreGenesisDeposits(ctx, st, deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process validator deposits")
|
||||
}
|
||||
|
||||
t, err := trie.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dr, err := t.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := st.SetEth1Data(ðpb.Eth1Data{DepositRoot: dr[:], BlockHash: eth1Data.BlockHash}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := st.SetEth1DepositIndex(0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return OptimizedGenesisBeaconState(genesisTime, st, st.Eth1Data())
|
||||
}
|
||||
|
||||
// OptimizedGenesisBeaconState is used to create a state that has already processed deposits. This is to efficiently
|
||||
// create a mainnet state at chainstart.
|
||||
func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState, eth1Data *ethpb.Eth1Data) (state.BeaconState, error) {
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/execution"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
@@ -67,13 +66,7 @@ func ExecuteStateTransition(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
|
||||
var valid bool
|
||||
if features.Get().EnableVerboseSigVerification {
|
||||
valid, err = set.VerifyVerbosely()
|
||||
} else {
|
||||
valid, err = set.Verify()
|
||||
}
|
||||
valid, err := set.Verify()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not batch verify signature")
|
||||
}
|
||||
@@ -258,19 +251,20 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
return nil, errors.Wrap(err, "could not process slot")
|
||||
}
|
||||
if time.CanProcessEpoch(state) {
|
||||
if state.Version() == version.Phase0 {
|
||||
switch state.Version() {
|
||||
case version.Phase0:
|
||||
state, err = ProcessEpochPrecompute(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not process epoch with optimizations")
|
||||
}
|
||||
} else if state.Version() >= version.Altair {
|
||||
case version.Altair, version.Bellatrix, version.Capella:
|
||||
state, err = altair.ProcessEpoch(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not process epoch")
|
||||
}
|
||||
} else {
|
||||
default:
|
||||
return nil, errors.New("beacon state should have a version")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,7 +53,7 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
||||
return nil, nil, errors.New("nil block")
|
||||
}
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ExecuteStateTransitionNoVerifyAnySig")
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ExecuteStateTransitionNoVerifyAttSigs")
|
||||
defer span.End()
|
||||
var err error
|
||||
|
||||
|
||||
@@ -151,18 +151,6 @@ func TestProcessBlockNoVerifyAnySigAltair_OK(t *testing.T) {
|
||||
require.Equal(t, true, verified, "Could not verify signature set")
|
||||
}
|
||||
|
||||
func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
|
||||
beaconState, block, _, _, _ := createFullBlockWithOperations(t)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(context.Background(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
|
||||
assert.Equal(t, "block signature", set.Descriptions[0])
|
||||
assert.Equal(t, "randao signature", set.Descriptions[1])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[2])
|
||||
}
|
||||
|
||||
func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) {
|
||||
beaconState, block := createFullAltairBlockWithOperations(t)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
|
||||
@@ -212,7 +212,7 @@ func createFullBlockWithOperations(t *testing.T) (state.BeaconState,
|
||||
err = beaconState.SetSlashings(make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector))
|
||||
require.NoError(t, err)
|
||||
cp := beaconState.CurrentJustifiedCheckpoint()
|
||||
var mockRoot [32]byte
|
||||
mockRoot := [32]byte{}
|
||||
copy(mockRoot[:], "hello-world")
|
||||
cp.Root = mockRoot[:]
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp))
|
||||
|
||||
@@ -328,8 +328,8 @@ func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.SignedBeaconBl
|
||||
func (s *Store) SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveHeadBlockRoot")
|
||||
defer span.End()
|
||||
hasStateSummary := s.HasStateSummary(ctx, blockRoot)
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
hasStateSummary := s.hasStateSummaryBytes(tx, blockRoot)
|
||||
hasStateInDB := tx.Bucket(stateBucket).Get(blockRoot[:]) != nil
|
||||
if !(hasStateInDB || hasStateSummary) {
|
||||
return errors.New("no state or state summary found with head block root")
|
||||
|
||||
@@ -98,7 +98,7 @@ func TestStore_SaveBackfillBlockRoot(t *testing.T) {
|
||||
_, err := db.BackfillBlockRoot(ctx)
|
||||
require.ErrorIs(t, err, ErrNotFoundBackfillBlockRoot)
|
||||
|
||||
var expected [32]byte
|
||||
expected := [32]byte{}
|
||||
copy(expected[:], []byte{0x23})
|
||||
err = db.SaveBackfillBlockRoot(ctx, expected)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -59,9 +59,9 @@ func (s *Store) SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.C
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hasStateSummary := s.HasStateSummary(ctx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(checkpointBucket)
|
||||
hasStateSummary := s.hasStateSummaryBytes(tx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
|
||||
if !(hasStateInDB || hasStateSummary) {
|
||||
log.Warnf("Recovering state summary for justified root: %#x", bytesutil.Trunc(checkpoint.Root))
|
||||
@@ -82,9 +82,9 @@ func (s *Store) SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.C
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hasStateSummary := s.HasStateSummary(ctx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(checkpointBucket)
|
||||
hasStateSummary := s.hasStateSummaryBytes(tx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
|
||||
if !(hasStateInDB || hasStateSummary) {
|
||||
log.Warnf("Recovering state summary for finalized root: %#x", bytesutil.Trunc(checkpoint.Root))
|
||||
|
||||
@@ -3,24 +3,38 @@ package kv
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/ssz/detect"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
dbIface "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/iface"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/ssz/detect"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// SaveGenesisData bootstraps the beaconDB with a given genesis state.
|
||||
func (s *Store) SaveGenesisData(ctx context.Context, genesisState state.BeaconState) error {
|
||||
wsb, err := blocks.NewGenesisBlockForState(ctx, genesisState)
|
||||
stateRoot, err := genesisState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block root")
|
||||
return errors.Wrap(err, "HashTreeRoot")
|
||||
}
|
||||
wsb, err := blocks.NewGenesisBlockForState(stateRoot, genesisState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "NewGenesisBlockForState")
|
||||
}
|
||||
genesisBlkRoot, err := wsb.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block root")
|
||||
}
|
||||
/*
|
||||
lbhr, err := genesisState.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to compute HTR of latest block header from genesis state")
|
||||
}
|
||||
if genesisBlkRoot != lbhr {
|
||||
return fmt.Errorf("state.latest_block_header=%#x does not match derived genessis block root=%#x", genesisBlkRoot, lbhr)
|
||||
}
|
||||
*/
|
||||
if err := s.SaveBlock(ctx, wsb); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis block")
|
||||
}
|
||||
|
||||
@@ -2,14 +2,12 @@ package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/bazelbuild/rules_go/go/tools/bazel"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/iface"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
@@ -50,37 +48,6 @@ func testGenesisDataSaved(t *testing.T, db iface.Database) {
|
||||
require.Equal(t, gbHTR, headHTR, "head block does not match genesis block")
|
||||
}
|
||||
|
||||
func TestLoadCapellaFromFile(t *testing.T) {
|
||||
cfg, err := params.ByName(params.MainnetName)
|
||||
require.NoError(t, err)
|
||||
// This state fixture is from a hive testnet, `0a` is the suffix they are using in their fork versions.
|
||||
suffix, err := hex.DecodeString("0a")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(suffix))
|
||||
reversioned := cfg.Copy()
|
||||
params.FillTestVersions(reversioned, suffix[0])
|
||||
reversioned.CapellaForkEpoch = 0
|
||||
require.Equal(t, [4]byte{3, 0, 0, 10}, bytesutil.ToBytes4(reversioned.CapellaForkVersion))
|
||||
reversioned.ConfigName = "capella-genesis-test"
|
||||
undo, err := params.SetActiveWithUndo(reversioned)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
|
||||
fp := "testdata/capella_genesis.ssz"
|
||||
rfp, err := bazel.Runfile(fp)
|
||||
if err == nil {
|
||||
fp = rfp
|
||||
}
|
||||
sb, err := os.ReadFile(fp)
|
||||
require.NoError(t, err)
|
||||
|
||||
db := setupDB(t)
|
||||
require.NoError(t, db.LoadGenesis(context.Background(), sb))
|
||||
testGenesisDataSaved(t, db)
|
||||
}
|
||||
|
||||
func TestLoadGenesisFromFile(t *testing.T) {
|
||||
// for this test to work, we need the active config to have these properties:
|
||||
// - fork version schedule that matches mainnnet.genesis.ssz
|
||||
@@ -90,7 +57,7 @@ func TestLoadGenesisFromFile(t *testing.T) {
|
||||
// uses the mainnet fork schedule. construct the differently named mainnet config and set it active.
|
||||
// finally, revert all this at the end of the test.
|
||||
|
||||
// first get the real mainnet out of the way by overwriting its schedule.
|
||||
// first get the real mainnet out of the way by overwriting it schedule.
|
||||
cfg, err := params.ByName(params.MainnetName)
|
||||
require.NoError(t, err)
|
||||
cfg = cfg.Copy()
|
||||
@@ -139,7 +106,7 @@ func TestLoadGenesisFromFile_mismatchedForkVersion(t *testing.T) {
|
||||
|
||||
// Loading a genesis with the wrong fork version as beacon config should throw an error.
|
||||
db := setupDB(t)
|
||||
assert.ErrorContains(t, "not found in any known fork choice schedule", db.LoadGenesis(context.Background(), sb))
|
||||
assert.ErrorContains(t, "does not match config genesis fork version", db.LoadGenesis(context.Background(), sb))
|
||||
}
|
||||
|
||||
func TestEnsureEmbeddedGenesis(t *testing.T) {
|
||||
|
||||
@@ -67,14 +67,9 @@ func (s *Store) HasStateSummary(ctx context.Context, blockRoot [32]byte) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasStateSummary")
|
||||
defer span.End()
|
||||
|
||||
if s.stateSummaryCache.has(blockRoot) {
|
||||
return true
|
||||
}
|
||||
|
||||
var hasSummary bool
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
enc := tx.Bucket(stateSummaryBucket).Get(blockRoot[:])
|
||||
hasSummary = len(enc) > 0
|
||||
hasSummary = s.hasStateSummaryBytes(tx, blockRoot)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return false
|
||||
@@ -82,6 +77,14 @@ func (s *Store) HasStateSummary(ctx context.Context, blockRoot [32]byte) bool {
|
||||
return hasSummary
|
||||
}
|
||||
|
||||
func (s *Store) hasStateSummaryBytes(tx *bolt.Tx, blockRoot [32]byte) bool {
|
||||
if s.stateSummaryCache.has(blockRoot) {
|
||||
return true
|
||||
}
|
||||
enc := tx.Bucket(stateSummaryBucket).Get(blockRoot[:])
|
||||
return len(enc) > 0
|
||||
}
|
||||
|
||||
// This saves all cached state summary objects to DB, and clears up the cache.
|
||||
func (s *Store) saveCachedStateSummariesDB(ctx context.Context) error {
|
||||
summaries := s.stateSummaryCache.getAll()
|
||||
|
||||
BIN
beacon-chain/db/kv/testdata/capella_genesis.ssz
vendored
BIN
beacon-chain/db/kv/testdata/capella_genesis.ssz
vendored
Binary file not shown.
@@ -38,9 +38,9 @@ func (s *Store) SaveLastValidatedCheckpoint(ctx context.Context, checkpoint *eth
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hasStateSummary := s.HasStateSummary(ctx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(checkpointBucket)
|
||||
hasStateSummary := s.hasStateSummaryBytes(tx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
|
||||
if !(hasStateInDB || hasStateSummary) {
|
||||
log.Warnf("Recovering state summary for last validated root: %#x", bytesutil.Trunc(checkpoint.Root))
|
||||
|
||||
@@ -51,26 +51,6 @@ func TestStore_LastValidatedCheckpoint_Recover(t *testing.T) {
|
||||
assert.Equal(t, true, proto.Equal(cp, retrieved), "Wanted %v, received %v", cp, retrieved)
|
||||
}
|
||||
|
||||
func BenchmarkStore_SaveLastValidatedCheckpoint(b *testing.B) {
|
||||
db := setupDB(b)
|
||||
ctx := context.Background()
|
||||
root := bytesutil.ToBytes32([]byte{'A'})
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: root[:],
|
||||
}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, st.SetSlot(1))
|
||||
require.NoError(b, db.SaveState(ctx, st, root))
|
||||
db.stateSummaryCache.clear()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
require.NoError(b, db.SaveLastValidatedCheckpoint(ctx, cp))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LastValidatedCheckpoint_DefaultIsFinalized(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -106,7 +106,11 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
|
||||
latestBlkTime := s.latestEth1Data.BlockTime
|
||||
s.latestEth1DataLock.RUnlock()
|
||||
|
||||
log.WithField("time (req val)", time).WithField("latestBlockTime", latestBlkTime).WithField("latestBlockHeight", s.latestEth1Data.BlockHeight).Info("BlockByTimestamp")
|
||||
if time > latestBlkTime {
|
||||
if latestBlkHeight < params.BeaconConfig().Eth1FollowDistance {
|
||||
|
||||
}
|
||||
return nil, errors.Wrap(errBlockTimeTooLate, fmt.Sprintf("(%d > %d)", time, latestBlkTime))
|
||||
}
|
||||
// Initialize a pointer to eth1 chain's history to start our search from.
|
||||
|
||||
@@ -163,7 +163,7 @@ func TestService_logTtdStatus(t *testing.T) {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
resp := &pb.ExecutionBlock{
|
||||
resp := &pb.ExecutionBlockBellatrix{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.Hash{},
|
||||
UncleHash: common.Hash{},
|
||||
@@ -219,7 +219,7 @@ func TestService_logTtdStatus_NotSyncedClient(t *testing.T) {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
resp := (*pb.ExecutionBlock)(nil) // Nil response when a client is not synced
|
||||
resp := (*pb.ExecutionBlockBellatrix)(nil) // Nil response when a client is not synced
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v3/consensus-types/payload-attribute"
|
||||
prysmType "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
types2 "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
@@ -32,15 +32,15 @@ import (
|
||||
const (
|
||||
// NewPayloadMethod v1 request string for JSON-RPC.
|
||||
NewPayloadMethod = "engine_newPayloadV1"
|
||||
// NewPayloadMethodV2 v2 request string for JSON-RPC.
|
||||
// NewPayloadMethod v2 request string for JSON-RPC.
|
||||
NewPayloadMethodV2 = "engine_newPayloadV2"
|
||||
// ForkchoiceUpdatedMethod v1 request string for JSON-RPC.
|
||||
ForkchoiceUpdatedMethod = "engine_forkchoiceUpdatedV1"
|
||||
// ForkchoiceUpdatedMethodV2 v2 request string for JSON-RPC.
|
||||
// ForkchoiceUpdatedMethod v2 request string for JSON-RPC.
|
||||
ForkchoiceUpdatedMethodV2 = "engine_forkchoiceUpdatedV2"
|
||||
// GetPayloadMethod v1 request string for JSON-RPC.
|
||||
GetPayloadMethod = "engine_getPayloadV1"
|
||||
// GetPayloadMethodV2 v2 request string for JSON-RPC.
|
||||
// GetPayloadMethod v2 request string for JSON-RPC.
|
||||
GetPayloadMethodV2 = "engine_getPayloadV2"
|
||||
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
|
||||
ExchangeTransitionConfigurationMethod = "engine_exchangeTransitionConfigurationV1"
|
||||
@@ -78,17 +78,15 @@ type EngineCaller interface {
|
||||
ForkchoiceUpdated(
|
||||
ctx context.Context, state *pb.ForkchoiceState, attrs payloadattribute.Attributer,
|
||||
) (*pb.PayloadIDBytes, []byte, error)
|
||||
GetPayload(ctx context.Context, payloadId [8]byte, slot prysmType.Slot) (interfaces.ExecutionData, error)
|
||||
GetPayload(ctx context.Context, payloadId [8]byte, slot types2.Slot) (interfaces.ExecutionData, error)
|
||||
ExchangeTransitionConfiguration(
|
||||
ctx context.Context, cfg *pb.TransitionConfiguration,
|
||||
) error
|
||||
ExecutionBlockByHash(ctx context.Context, hash common.Hash, withTxs bool) (*pb.ExecutionBlock, error)
|
||||
ExecutionBlockByHash(ctx context.Context, forkVersion int, hash common.Hash, withTxs bool) (interface{}, error)
|
||||
GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error)
|
||||
}
|
||||
|
||||
var EmptyBlockHash = errors.New("Block hash is empty 0x0000...")
|
||||
|
||||
// NewPayload calls the engine_newPayloadVX method via JSON-RPC.
|
||||
// NewPayload calls the engine_newPayloadV1 method via JSON-RPC.
|
||||
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.NewPayload")
|
||||
defer span.End()
|
||||
@@ -101,9 +99,8 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
result := &pb.PayloadStatus{}
|
||||
|
||||
switch payload.Proto().(type) {
|
||||
case *pb.ExecutionPayload:
|
||||
payloadPb, ok := payload.Proto().(*pb.ExecutionPayloadCapella)
|
||||
if !ok {
|
||||
payloadPb, ok := payload.Proto().(*pb.ExecutionPayload)
|
||||
if !ok {
|
||||
return nil, errors.New("execution data must be a Bellatrix or Capella execution payload")
|
||||
@@ -112,17 +109,11 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
case *pb.ExecutionPayloadCapella:
|
||||
payloadPb, ok := payload.Proto().(*pb.ExecutionPayloadCapella)
|
||||
if !ok {
|
||||
return nil, errors.New("execution data must be a Capella execution payload")
|
||||
}
|
||||
} else {
|
||||
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethodV2, payloadPb)
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("unknown execution data type")
|
||||
}
|
||||
|
||||
switch result.Status {
|
||||
@@ -156,7 +147,7 @@ func (s *Service) ForkchoiceUpdated(
|
||||
result := &ForkchoiceUpdatedResponse{}
|
||||
|
||||
if attrs == nil {
|
||||
return nil, nil, errors.New("nil payload attributer")
|
||||
return nil, nil, errors.New("nil payload attribute")
|
||||
}
|
||||
switch attrs.Version() {
|
||||
case version.Bellatrix:
|
||||
@@ -177,8 +168,6 @@ func (s *Service) ForkchoiceUpdated(
|
||||
if err != nil {
|
||||
return nil, nil, handleRPCError(err)
|
||||
}
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("unknown payload attribute version: %v", attrs.Version())
|
||||
}
|
||||
|
||||
if result.Status == nil {
|
||||
@@ -197,8 +186,8 @@ func (s *Service) ForkchoiceUpdated(
|
||||
}
|
||||
}
|
||||
|
||||
// GetPayload calls the engine_getPayloadVX method via JSON-RPC.
|
||||
func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot prysmType.Slot) (interfaces.ExecutionData, error) {
|
||||
// GetPayload calls the engine_getPayloadV1 method via JSON-RPC.
|
||||
func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot types2.Slot) (interfaces.ExecutionData, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetPayload")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
@@ -315,16 +304,20 @@ func (s *Service) GetTerminalBlockHash(ctx context.Context, transitionTime uint6
|
||||
if parentHash == params.BeaconConfig().ZeroHash {
|
||||
return nil, false, nil
|
||||
}
|
||||
parentBlk, err := s.ExecutionBlockByHash(ctx, parentHash, false /* no txs */)
|
||||
parentBlk, err := s.ExecutionBlockByHash(ctx, version.Bellatrix, parentHash, false /* no txs */)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get parent execution block")
|
||||
}
|
||||
if parentBlk == nil {
|
||||
return nil, false, errors.New("parent execution block is nil")
|
||||
}
|
||||
bellatrixParentBlk, ok := parentBlk.(*pb.ExecutionBlockBellatrix)
|
||||
if !ok {
|
||||
return nil, false, fmt.Errorf("wrong execution block type %T", parentBlk)
|
||||
}
|
||||
|
||||
if blockReachedTTD {
|
||||
parentTotalDifficulty, err := tDStringToUint256(parentBlk.TotalDifficulty)
|
||||
parentTotalDifficulty, err := tDStringToUint256(bellatrixParentBlk.TotalDifficulty)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
|
||||
}
|
||||
@@ -343,7 +336,7 @@ func (s *Service) GetTerminalBlockHash(ctx context.Context, transitionTime uint6
|
||||
"number": blk.Number,
|
||||
"hash": fmt.Sprintf("%#x", bytesutil.Trunc(blk.Hash[:])),
|
||||
"td": blk.TotalDifficulty,
|
||||
"parentTd": parentBlk.TotalDifficulty,
|
||||
"parentTd": bellatrixParentBlk.TotalDifficulty,
|
||||
"ttd": terminalTotalDifficulty,
|
||||
}).Info("Retrieved terminal block hash")
|
||||
return blk.Hash[:], true, nil
|
||||
@@ -351,17 +344,17 @@ func (s *Service) GetTerminalBlockHash(ctx context.Context, transitionTime uint6
|
||||
} else {
|
||||
return nil, false, nil
|
||||
}
|
||||
blk = parentBlk
|
||||
blk = bellatrixParentBlk
|
||||
}
|
||||
}
|
||||
|
||||
// LatestExecutionBlock fetches the latest execution engine block by calling
|
||||
// eth_blockByNumber via JSON-RPC.
|
||||
func (s *Service) LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error) {
|
||||
func (s *Service) LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlockBellatrix, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.LatestExecutionBlock")
|
||||
defer span.End()
|
||||
|
||||
result := &pb.ExecutionBlock{}
|
||||
result := &pb.ExecutionBlockBellatrix{}
|
||||
err := s.rpcClient.CallContext(
|
||||
ctx,
|
||||
result,
|
||||
@@ -373,29 +366,38 @@ func (s *Service) LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock,
|
||||
}
|
||||
|
||||
// ExecutionBlockByHash fetches an execution engine block by hash by calling
|
||||
// eth_blockByHash via JSON-RPC.
|
||||
func (s *Service) ExecutionBlockByHash(ctx context.Context, hash common.Hash, withTxs bool) (*pb.ExecutionBlock, error) {
|
||||
// eth_blockByHash via JSON-RPC. The forkVersion parameter determines which fork's
|
||||
// execution block will be returned.
|
||||
func (s *Service) ExecutionBlockByHash(ctx context.Context, forkVersion int, hash common.Hash, withTxs bool) (interface{}, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExecutionBlockByHash")
|
||||
defer span.End()
|
||||
result := &pb.ExecutionBlock{}
|
||||
var result interface{}
|
||||
switch forkVersion {
|
||||
case version.Capella:
|
||||
result = &pb.ExecutionBlockCapella{}
|
||||
case version.Bellatrix:
|
||||
result = &pb.ExecutionBlockBellatrix{}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported execution block version %s", version.String(forkVersion))
|
||||
}
|
||||
err := s.rpcClient.CallContext(ctx, result, ExecutionBlockByHashMethod, hash, withTxs)
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// ExecutionBlocksByHashes fetches a batch of execution engine blocks by hash by calling
|
||||
// eth_blockByHash via JSON-RPC.
|
||||
func (s *Service) ExecutionBlocksByHashes(ctx context.Context, hashes []common.Hash, withTxs bool) ([]*pb.ExecutionBlock, error) {
|
||||
func (s *Service) ExecutionBlocksByHashes(ctx context.Context, hashes []common.Hash, withTxs bool) ([]*pb.ExecutionBlockBellatrix, error) {
|
||||
_, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExecutionBlocksByHashes")
|
||||
defer span.End()
|
||||
numOfHashes := len(hashes)
|
||||
elems := make([]gethRPC.BatchElem, 0, numOfHashes)
|
||||
execBlks := make([]*pb.ExecutionBlock, 0, numOfHashes)
|
||||
execBlks := make([]*pb.ExecutionBlockBellatrix, 0, numOfHashes)
|
||||
errs := make([]error, 0, numOfHashes)
|
||||
if numOfHashes == 0 {
|
||||
return execBlks, nil
|
||||
}
|
||||
for _, h := range hashes {
|
||||
blk := &pb.ExecutionBlock{}
|
||||
blk := &pb.ExecutionBlockBellatrix{}
|
||||
err := error(nil)
|
||||
newH := h
|
||||
elems = append(elems, gethRPC.BatchElem{
|
||||
@@ -466,18 +468,22 @@ func (s *Service) ReconstructFullBlock(
|
||||
}
|
||||
|
||||
executionBlockHash := common.BytesToHash(header.BlockHash())
|
||||
executionBlock, err := s.ExecutionBlockByHash(ctx, executionBlockHash, true /* with txs */)
|
||||
var executionBlockIface interface{}
|
||||
if blindedBlock.Version() == version.Bellatrix {
|
||||
executionBlockIface, err = s.ExecutionBlockByHash(ctx, version.Bellatrix, executionBlockHash, true /* with txs */)
|
||||
} else {
|
||||
executionBlockIface, err = s.ExecutionBlockByHash(ctx, version.Capella, executionBlockHash, true /* with txs */)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch execution block with txs by hash %#x: %v", executionBlockHash, err)
|
||||
}
|
||||
executionBlock, ok := executionBlockIface.(pb.ExecutionBlock)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("block type %T does not implement ExecutionBlock", executionBlockIface)
|
||||
}
|
||||
if executionBlock == nil {
|
||||
return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionBlockHash)
|
||||
}
|
||||
if bytes.Equal(executionBlock.Hash.Bytes(), []byte{}) {
|
||||
return nil, EmptyBlockHash
|
||||
}
|
||||
|
||||
executionBlock.Version = blindedBlock.Version()
|
||||
payload, err := fullPayloadFromExecutionBlock(header, executionBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -566,12 +572,12 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
|
||||
}
|
||||
|
||||
func fullPayloadFromExecutionBlock(
|
||||
header interfaces.ExecutionData, block *pb.ExecutionBlock,
|
||||
header interfaces.ExecutionData, block pb.ExecutionBlock,
|
||||
) (interfaces.ExecutionData, error) {
|
||||
if header.IsNil() || block == nil {
|
||||
return nil, errors.New("execution block and header cannot be nil")
|
||||
}
|
||||
blockHash := block.Hash
|
||||
blockHash := block.GetHash()
|
||||
if !bytes.Equal(header.BlockHash(), blockHash[:]) {
|
||||
return nil, fmt.Errorf(
|
||||
"block hash field in execution header %#x does not match execution block hash %#x",
|
||||
@@ -579,7 +585,7 @@ func fullPayloadFromExecutionBlock(
|
||||
blockHash,
|
||||
)
|
||||
}
|
||||
blockTransactions := block.Transactions
|
||||
blockTransactions := block.GetTransactions()
|
||||
txs := make([][]byte, len(blockTransactions))
|
||||
for i, tx := range blockTransactions {
|
||||
txBin, err := tx.MarshalBinary()
|
||||
@@ -589,7 +595,7 @@ func fullPayloadFromExecutionBlock(
|
||||
txs[i] = txBin
|
||||
}
|
||||
|
||||
if block.Version == version.Bellatrix {
|
||||
if block.Version() == version.Bellatrix {
|
||||
return blocks.WrappedExecutionPayload(&pb.ExecutionPayload{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
@@ -607,6 +613,10 @@ func fullPayloadFromExecutionBlock(
|
||||
Transactions: txs,
|
||||
})
|
||||
}
|
||||
withdrawals, err := block.GetWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
@@ -622,7 +632,7 @@ func fullPayloadFromExecutionBlock(
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: blockHash[:],
|
||||
Transactions: txs,
|
||||
Withdrawals: block.Withdrawals,
|
||||
Withdrawals: withdrawals,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -178,7 +178,7 @@ func FuzzExecutionBlock(f *testing.F) {
|
||||
S: big.NewInt(math.MaxInt),
|
||||
}
|
||||
tx := types.NewTx(innerData)
|
||||
execBlock := &pb.ExecutionBlock{
|
||||
execBlock := &pb.ExecutionBlockBellatrix{
|
||||
Header: types.Header{
|
||||
ParentHash: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
Root: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}),
|
||||
@@ -203,7 +203,7 @@ func FuzzExecutionBlock(f *testing.F) {
|
||||
|
||||
f.Fuzz(func(t *testing.T, jsonBlob []byte) {
|
||||
gethResp := make(map[string]interface{})
|
||||
prysmResp := &pb.ExecutionBlock{}
|
||||
prysmResp := &pb.ExecutionBlockBellatrix{}
|
||||
gethErr := json.Unmarshal(jsonBlob, &gethResp)
|
||||
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
|
||||
// Nothing to marshal if we have an error.
|
||||
@@ -237,7 +237,7 @@ func FuzzExecutionBlock(f *testing.F) {
|
||||
})
|
||||
}
|
||||
|
||||
func isBogusTransactionHash(blk *pb.ExecutionBlock, jsonMap map[string]interface{}) bool {
|
||||
func isBogusTransactionHash(blk *pb.ExecutionBlockBellatrix, jsonMap map[string]interface{}) bool {
|
||||
if blk.Transactions == nil {
|
||||
return false
|
||||
}
|
||||
@@ -260,7 +260,7 @@ func isBogusTransactionHash(blk *pb.ExecutionBlock, jsonMap map[string]interface
|
||||
|
||||
func compareHeaders(t *testing.T, jsonBlob []byte) {
|
||||
gethResp := &types.Header{}
|
||||
prysmResp := &pb.ExecutionBlock{}
|
||||
prysmResp := &pb.ExecutionBlockBellatrix{}
|
||||
gethErr := json.Unmarshal(jsonBlob, gethResp)
|
||||
prysmErr := json.Unmarshal(jsonBlob, prysmResp)
|
||||
assert.Equal(t, gethErr != nil, prysmErr != nil, fmt.Sprintf("geth and prysm unmarshaller return inconsistent errors. %v and %v", gethErr, prysmErr))
|
||||
@@ -282,7 +282,7 @@ func compareHeaders(t *testing.T, jsonBlob []byte) {
|
||||
assert.DeepEqual(t, newGethResp, newGethResp2)
|
||||
}
|
||||
|
||||
func validateBlockConsistency(execBlock *pb.ExecutionBlock, jsonMap map[string]interface{}) error {
|
||||
func validateBlockConsistency(execBlock *pb.ExecutionBlockBellatrix, jsonMap map[string]interface{}) error {
|
||||
blockVal := reflect.ValueOf(execBlock).Elem()
|
||||
bType := reflect.TypeOf(execBlock).Elem()
|
||||
|
||||
@@ -316,7 +316,7 @@ func validateBlockConsistency(execBlock *pb.ExecutionBlock, jsonMap map[string]i
|
||||
return nil
|
||||
}
|
||||
|
||||
func jsonFieldsAreValid(execBlock *pb.ExecutionBlock, jsonMap map[string]interface{}) (bool, error) {
|
||||
func jsonFieldsAreValid(execBlock *pb.ExecutionBlockBellatrix, jsonMap map[string]interface{}) (bool, error) {
|
||||
bType := reflect.TypeOf(execBlock).Elem()
|
||||
|
||||
fieldnum := bType.NumField()
|
||||
|
||||
@@ -54,7 +54,6 @@ func (RPCClientBad) CallContext(context.Context, interface{}, string, ...interfa
|
||||
}
|
||||
|
||||
func TestClient_IPC(t *testing.T) {
|
||||
t.Skip("Skipping IPC test to support Capella devnet-3")
|
||||
server := newTestIPCServer(t)
|
||||
defer server.Stop()
|
||||
rpcClient := rpc.DialInProc(server)
|
||||
@@ -64,47 +63,20 @@ func TestClient_IPC(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fix := fixtures()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run(GetPayloadMethod, func(t *testing.T) {
|
||||
want, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
payloadId := [8]byte{1}
|
||||
resp, err := srv.GetPayload(ctx, payloadId, 1)
|
||||
resp, err := srv.GetPayload(ctx, payloadId)
|
||||
require.NoError(t, err)
|
||||
resPb, err := resp.PbBellatrix()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resPb)
|
||||
})
|
||||
t.Run(GetPayloadMethodV2, func(t *testing.T) {
|
||||
want, ok := fix["ExecutionPayloadCapella"].(*pb.ExecutionPayloadCapella)
|
||||
require.Equal(t, true, ok)
|
||||
payloadId := [8]byte{1}
|
||||
resp, err := srv.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
resPb, err := resp.PbCapella()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resPb)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethod, func(t *testing.T) {
|
||||
want, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
p, err := payloadattribute.New(&pb.PayloadAttributes{})
|
||||
attr, err := payloadattribute.New(&pb.PayloadAttributes{})
|
||||
require.NoError(t, err)
|
||||
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, &pb.ForkchoiceState{}, p)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.Status.LatestValidHash, validHash)
|
||||
require.DeepEqual(t, want.PayloadId, payloadID)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethodV2, func(t *testing.T) {
|
||||
want, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
p, err := payloadattribute.New(&pb.PayloadAttributesV2{})
|
||||
require.NoError(t, err)
|
||||
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, &pb.ForkchoiceState{}, p)
|
||||
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, &pb.ForkchoiceState{}, attr)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.Status.LatestValidHash, validHash)
|
||||
require.DeepEqual(t, want.PayloadId, payloadID)
|
||||
@@ -120,17 +92,6 @@ func TestClient_IPC(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
|
||||
})
|
||||
t.Run(NewPayloadMethodV2, func(t *testing.T) {
|
||||
want, ok := fix["ValidPayloadStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
req, ok := fix["ExecutionPayloadCapella"].(*pb.ExecutionPayloadCapella)
|
||||
require.Equal(t, true, ok)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(req)
|
||||
require.NoError(t, err)
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
|
||||
})
|
||||
t.Run(ExchangeTransitionConfigurationMethod, func(t *testing.T) {
|
||||
want, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -138,33 +99,26 @@ func TestClient_IPC(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
})
|
||||
t.Run(ExecutionBlockByNumberMethod, func(t *testing.T) {
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlockBellatrix)
|
||||
require.Equal(t, true, ok)
|
||||
resp, err := srv.LatestExecutionBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
t.Run(ExecutionBlockByHashMethod, func(t *testing.T) {
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlockBellatrix)
|
||||
require.Equal(t, true, ok)
|
||||
arg := common.BytesToHash([]byte("foo"))
|
||||
resp, err := srv.ExecutionBlockByHash(ctx, arg, true /* with txs */)
|
||||
resp, err := srv.ExecutionBlockByHash(ctx, version.Bellatrix, arg, true /* with txs */)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
}
|
||||
|
||||
func TestClient_HTTP(t *testing.T) {
|
||||
t.Skip("Skipping HTTP test to support Capella devnet-3")
|
||||
|
||||
ctx := context.Background()
|
||||
fix := fixtures()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run(GetPayloadMethod, func(t *testing.T) {
|
||||
payloadId := [8]byte{1}
|
||||
want, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
@@ -203,55 +157,9 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.GetPayload(ctx, payloadId, 1)
|
||||
resp, err := client.GetPayload(ctx, payloadId)
|
||||
require.NoError(t, err)
|
||||
pb, err := resp.PbBellatrix()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, pb)
|
||||
})
|
||||
t.Run(GetPayloadMethodV2, func(t *testing.T) {
|
||||
payloadId := [8]byte{1}
|
||||
want, ok := fix["ExecutionPayloadCapella"].(*pb.ExecutionPayloadCapella)
|
||||
require.Equal(t, true, ok)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
reqArg, err := json.Marshal(pb.PayloadIDBytes(payloadId))
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(reqArg),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": want,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
client := &Service{}
|
||||
client.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
pb, err := resp.PbCapella()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, pb)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethod+" VALID status", func(t *testing.T) {
|
||||
forkChoiceState := &pb.ForkchoiceState{
|
||||
@@ -264,38 +172,14 @@ func TestClient_HTTP(t *testing.T) {
|
||||
PrevRandao: []byte("random"),
|
||||
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
|
||||
}
|
||||
p, err := payloadattribute.New(payloadAttributes)
|
||||
require.NoError(t, err)
|
||||
want, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
srv := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, forkChoiceState, p)
|
||||
attr, err := payloadattribute.New(payloadAttributes)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.Status.LatestValidHash, validHash)
|
||||
require.DeepEqual(t, want.PayloadId, payloadID)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethodV2+" VALID status", func(t *testing.T) {
|
||||
forkChoiceState := &pb.ForkchoiceState{
|
||||
HeadBlockHash: []byte("head"),
|
||||
SafeBlockHash: []byte("safe"),
|
||||
FinalizedBlockHash: []byte("finalized"),
|
||||
}
|
||||
payloadAttributes := &pb.PayloadAttributesV2{
|
||||
Timestamp: 1,
|
||||
PrevRandao: []byte("random"),
|
||||
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
|
||||
Withdrawals: []*pb.Withdrawal{{ValidatorIndex: 1, Amount: 1}},
|
||||
}
|
||||
p, err := payloadattribute.New(payloadAttributes)
|
||||
require.NoError(t, err)
|
||||
want, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
srv := forkchoiceUpdateSetupV2(t, forkChoiceState, payloadAttributes, want)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, forkChoiceState, p)
|
||||
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, forkChoiceState, attr)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.Status.LatestValidHash, validHash)
|
||||
require.DeepEqual(t, want.PayloadId, payloadID)
|
||||
@@ -311,38 +195,14 @@ func TestClient_HTTP(t *testing.T) {
|
||||
PrevRandao: []byte("random"),
|
||||
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
|
||||
}
|
||||
p, err := payloadattribute.New(payloadAttributes)
|
||||
require.NoError(t, err)
|
||||
want, ok := fix["ForkchoiceUpdatedSyncingResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
client := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, p)
|
||||
require.ErrorIs(t, err, ErrAcceptedSyncingPayloadStatus)
|
||||
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
|
||||
require.DeepEqual(t, []byte(nil), validHash)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethodV2+" SYNCING status", func(t *testing.T) {
|
||||
forkChoiceState := &pb.ForkchoiceState{
|
||||
HeadBlockHash: []byte("head"),
|
||||
SafeBlockHash: []byte("safe"),
|
||||
FinalizedBlockHash: []byte("finalized"),
|
||||
}
|
||||
payloadAttributes := &pb.PayloadAttributesV2{
|
||||
Timestamp: 1,
|
||||
PrevRandao: []byte("random"),
|
||||
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
|
||||
Withdrawals: []*pb.Withdrawal{{ValidatorIndex: 1, Amount: 1}},
|
||||
}
|
||||
p, err := payloadattribute.New(payloadAttributes)
|
||||
attr, err := payloadattribute.New(&payloadAttributes)
|
||||
require.NoError(t, err)
|
||||
want, ok := fix["ForkchoiceUpdatedSyncingResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
srv := forkchoiceUpdateSetupV2(t, forkChoiceState, payloadAttributes, want)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, forkChoiceState, p)
|
||||
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, attr)
|
||||
require.ErrorIs(t, err, ErrAcceptedSyncingPayloadStatus)
|
||||
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
|
||||
require.DeepEqual(t, []byte(nil), validHash)
|
||||
@@ -358,14 +218,14 @@ func TestClient_HTTP(t *testing.T) {
|
||||
PrevRandao: []byte("random"),
|
||||
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
|
||||
}
|
||||
p, err := payloadattribute.New(payloadAttributes)
|
||||
require.NoError(t, err)
|
||||
want, ok := fix["ForkchoiceUpdatedInvalidResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
client := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, p)
|
||||
attr, err := payloadattribute.New(&payloadAttributes)
|
||||
require.NoError(t, err)
|
||||
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, attr)
|
||||
require.ErrorIs(t, err, ErrInvalidPayloadStatus)
|
||||
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
|
||||
require.DeepEqual(t, want.Status.LatestValidHash, validHash)
|
||||
@@ -381,14 +241,14 @@ func TestClient_HTTP(t *testing.T) {
|
||||
PrevRandao: []byte("random"),
|
||||
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
|
||||
}
|
||||
p, err := payloadattribute.New(payloadAttributes)
|
||||
require.NoError(t, err)
|
||||
want, ok := fix["ForkchoiceUpdatedAcceptedResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
client := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, p)
|
||||
attr, err := payloadattribute.New(&payloadAttributes)
|
||||
require.NoError(t, err)
|
||||
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, attr)
|
||||
require.ErrorIs(t, err, ErrUnknownPayloadStatus)
|
||||
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
|
||||
require.DeepEqual(t, []byte(nil), validHash)
|
||||
@@ -407,20 +267,6 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV2+" VALID status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadCapella"].(*pb.ExecutionPayloadCapella)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["ValidPayloadStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV2Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
t.Run(NewPayloadMethod+" SYNCING status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -435,20 +281,6 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV2+" SYNCING status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadCapella"].(*pb.ExecutionPayloadCapella)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["SyncingStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV2Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethod+" INVALID_BLOCK_HASH status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -463,20 +295,6 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV2+" INVALID_BLOCK_HASH status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadCapella"].(*pb.ExecutionPayloadCapella)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["InvalidBlockHashStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV2Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethod+" INVALID status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -491,20 +309,6 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV2+" INVALID status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadCapella"].(*pb.ExecutionPayloadCapella)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["InvalidStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV2Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload)
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
t.Run(NewPayloadMethod+" UNKNOWN status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -520,7 +324,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(ExecutionBlockByNumberMethod, func(t *testing.T) {
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlockBellatrix)
|
||||
require.Equal(t, true, ok)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -589,7 +393,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
})
|
||||
t.Run(ExecutionBlockByHashMethod, func(t *testing.T) {
|
||||
arg := common.BytesToHash([]byte("foo"))
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlockBellatrix)
|
||||
require.Equal(t, true, ok)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -621,7 +425,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := service.ExecutionBlockByHash(ctx, arg, true /* with txs */)
|
||||
resp, err := service.ExecutionBlockByHash(ctx, version.Bellatrix, arg, true /* with txs */)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
@@ -885,8 +689,8 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
paramsTd string
|
||||
currentPowBlock *pb.ExecutionBlock
|
||||
parentPowBlock *pb.ExecutionBlock
|
||||
currentPowBlock *pb.ExecutionBlockBellatrix
|
||||
parentPowBlock *pb.ExecutionBlockBellatrix
|
||||
errLatestExecutionBlk error
|
||||
wantTerminalBlockHash []byte
|
||||
wantExists bool
|
||||
@@ -911,7 +715,7 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
{
|
||||
name: "current execution block invalid TD",
|
||||
paramsTd: "1",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
currentPowBlock: &pb.ExecutionBlockBellatrix{
|
||||
Hash: common.BytesToHash([]byte("a")),
|
||||
TotalDifficulty: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
|
||||
},
|
||||
@@ -920,7 +724,7 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
{
|
||||
name: "current execution block has zero hash parent",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
currentPowBlock: &pb.ExecutionBlockBellatrix{
|
||||
Hash: common.BytesToHash([]byte("a")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash(params.BeaconConfig().ZeroHash[:]),
|
||||
@@ -931,7 +735,7 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
{
|
||||
name: "could not get parent block",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
currentPowBlock: &pb.ExecutionBlockBellatrix{
|
||||
Hash: common.BytesToHash([]byte("a")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("b")),
|
||||
@@ -943,14 +747,14 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
{
|
||||
name: "parent execution block invalid TD",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
currentPowBlock: &pb.ExecutionBlockBellatrix{
|
||||
Hash: common.BytesToHash([]byte("a")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("b")),
|
||||
},
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
parentPowBlock: &pb.ExecutionBlock{
|
||||
parentPowBlock: &pb.ExecutionBlockBellatrix{
|
||||
Hash: common.BytesToHash([]byte("b")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("c")),
|
||||
@@ -962,14 +766,14 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
{
|
||||
name: "happy case",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
currentPowBlock: &pb.ExecutionBlockBellatrix{
|
||||
Hash: common.BytesToHash([]byte("a")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("b")),
|
||||
},
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
parentPowBlock: &pb.ExecutionBlock{
|
||||
parentPowBlock: &pb.ExecutionBlockBellatrix{
|
||||
Hash: common.BytesToHash([]byte("b")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("c")),
|
||||
@@ -982,7 +786,7 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
{
|
||||
name: "happy case, but invalid timestamp",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
currentPowBlock: &pb.ExecutionBlockBellatrix{
|
||||
Hash: common.BytesToHash([]byte("a")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("b")),
|
||||
@@ -990,7 +794,7 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
},
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
parentPowBlock: &pb.ExecutionBlock{
|
||||
parentPowBlock: &pb.ExecutionBlockBellatrix{
|
||||
Hash: common.BytesToHash([]byte("b")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("c")),
|
||||
@@ -1001,14 +805,14 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
{
|
||||
name: "ttd not reached",
|
||||
paramsTd: "3",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
currentPowBlock: &pb.ExecutionBlockBellatrix{
|
||||
Hash: common.BytesToHash([]byte("a")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("b")),
|
||||
},
|
||||
TotalDifficulty: "0x2",
|
||||
},
|
||||
parentPowBlock: &pb.ExecutionBlock{
|
||||
parentPowBlock: &pb.ExecutionBlockBellatrix{
|
||||
Hash: common.BytesToHash([]byte("b")),
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash([]byte("c")),
|
||||
@@ -1022,9 +826,9 @@ func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.TerminalTotalDifficulty = tt.paramsTd
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
var m map[[32]byte]*pb.ExecutionBlock
|
||||
var m map[[32]byte]*pb.ExecutionBlockBellatrix
|
||||
if tt.parentPowBlock != nil {
|
||||
m = map[[32]byte]*pb.ExecutionBlock{
|
||||
m = map[[32]byte]*pb.ExecutionBlockBellatrix{
|
||||
tt.parentPowBlock.Hash: tt.parentPowBlock,
|
||||
}
|
||||
}
|
||||
@@ -1280,23 +1084,6 @@ func fixtures() map[string]interface{} {
|
||||
BlockHash: foo[:],
|
||||
Transactions: [][]byte{foo[:]},
|
||||
}
|
||||
executionPayloadFixtureCapella := &pb.ExecutionPayloadCapella{
|
||||
ParentHash: foo[:],
|
||||
FeeRecipient: bar,
|
||||
StateRoot: foo[:],
|
||||
ReceiptsRoot: foo[:],
|
||||
LogsBloom: baz,
|
||||
PrevRandao: foo[:],
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: foo[:],
|
||||
BaseFeePerGas: bytesutil.PadTo(baseFeePerGas.Bytes(), fieldparams.RootLength),
|
||||
BlockHash: foo[:],
|
||||
Transactions: [][]byte{foo[:]},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
}
|
||||
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
|
||||
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
|
||||
miner := bytesutil.PadTo([]byte("miner"), fieldparams.FeeRecipientLength)
|
||||
@@ -1304,8 +1091,7 @@ func fixtures() map[string]interface{} {
|
||||
transactionsRoot := bytesutil.PadTo([]byte("transactionsRoot"), fieldparams.RootLength)
|
||||
receiptsRoot := bytesutil.PadTo([]byte("receiptsRoot"), fieldparams.RootLength)
|
||||
logsBloom := bytesutil.PadTo([]byte("logs"), fieldparams.LogsBloomLength)
|
||||
executionBlock := &pb.ExecutionBlock{
|
||||
Version: version.Bellatrix,
|
||||
executionBlock := &pb.ExecutionBlockBellatrix{
|
||||
Header: gethtypes.Header{
|
||||
ParentHash: common.BytesToHash(parent),
|
||||
UncleHash: common.BytesToHash(sha3Uncles),
|
||||
@@ -1391,7 +1177,6 @@ func fixtures() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"ExecutionBlock": executionBlock,
|
||||
"ExecutionPayload": executionPayloadFixture,
|
||||
"ExecutionPayloadCapella": executionPayloadFixtureCapella,
|
||||
"ValidPayloadStatus": validStatus,
|
||||
"InvalidBlockHashStatus": inValidBlockHashStatus,
|
||||
"AcceptedStatus": acceptedStatus,
|
||||
@@ -1409,7 +1194,7 @@ func fixtures() map[string]interface{} {
|
||||
func Test_fullPayloadFromExecutionBlock(t *testing.T) {
|
||||
type args struct {
|
||||
header *pb.ExecutionPayloadHeader
|
||||
block *pb.ExecutionBlock
|
||||
block *pb.ExecutionBlockBellatrix
|
||||
}
|
||||
wantedHash := common.BytesToHash([]byte("foo"))
|
||||
tests := []struct {
|
||||
@@ -1424,9 +1209,8 @@ func Test_fullPayloadFromExecutionBlock(t *testing.T) {
|
||||
header: &pb.ExecutionPayloadHeader{
|
||||
BlockHash: []byte("foo"),
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Version: version.Bellatrix,
|
||||
Hash: common.BytesToHash([]byte("bar")),
|
||||
block: &pb.ExecutionBlockBellatrix{
|
||||
Hash: common.BytesToHash([]byte("bar")),
|
||||
},
|
||||
},
|
||||
err: "does not match execution block hash",
|
||||
@@ -1437,9 +1221,8 @@ func Test_fullPayloadFromExecutionBlock(t *testing.T) {
|
||||
header: &pb.ExecutionPayloadHeader{
|
||||
BlockHash: wantedHash[:],
|
||||
},
|
||||
block: &pb.ExecutionBlock{
|
||||
Version: version.Bellatrix,
|
||||
Hash: wantedHash,
|
||||
block: &pb.ExecutionBlockBellatrix{
|
||||
Hash: wantedHash,
|
||||
},
|
||||
},
|
||||
want: func() interfaces.ExecutionData {
|
||||
@@ -1544,9 +1327,9 @@ func (*testEngineService) NoArgsRets() {}
|
||||
|
||||
func (*testEngineService) GetBlockByHash(
|
||||
_ context.Context, _ common.Hash, _ bool,
|
||||
) *pb.ExecutionBlock {
|
||||
) *pb.ExecutionBlockBellatrix {
|
||||
fix := fixtures()
|
||||
item, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
item, ok := fix["ExecutionBlock"].(*pb.ExecutionBlockBellatrix)
|
||||
if !ok {
|
||||
panic("not found")
|
||||
}
|
||||
@@ -1555,9 +1338,9 @@ func (*testEngineService) GetBlockByHash(
|
||||
|
||||
func (*testEngineService) GetBlockByNumber(
|
||||
_ context.Context, _ string, _ bool,
|
||||
) *pb.ExecutionBlock {
|
||||
) *pb.ExecutionBlockBellatrix {
|
||||
fix := fixtures()
|
||||
item, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
item, ok := fix["ExecutionBlock"].(*pb.ExecutionBlockBellatrix)
|
||||
if !ok {
|
||||
panic("not found")
|
||||
}
|
||||
@@ -1575,17 +1358,6 @@ func (*testEngineService) GetPayloadV1(
|
||||
return item
|
||||
}
|
||||
|
||||
func (*testEngineService) GetPayloadV2(
|
||||
_ context.Context, _ pb.PayloadIDBytes,
|
||||
) *pb.ExecutionPayloadCapella {
|
||||
fix := fixtures()
|
||||
item, ok := fix["ExecutionPayloadCapella"].(*pb.ExecutionPayloadCapella)
|
||||
if !ok {
|
||||
panic("not found")
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
func (*testEngineService) ExchangeTransitionConfigurationV1(
|
||||
_ context.Context, _ *pb.TransitionConfiguration,
|
||||
) *pb.TransitionConfiguration {
|
||||
@@ -1609,18 +1381,6 @@ func (*testEngineService) ForkchoiceUpdatedV1(
|
||||
return item
|
||||
}
|
||||
|
||||
func (*testEngineService) ForkchoiceUpdatedV2(
|
||||
_ context.Context, _ *pb.ForkchoiceState, _ *pb.PayloadAttributes,
|
||||
) *ForkchoiceUpdatedResponse {
|
||||
fix := fixtures()
|
||||
item, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
|
||||
if !ok {
|
||||
panic("not found")
|
||||
}
|
||||
item.Status.Status = pb.PayloadStatus_VALID
|
||||
return item
|
||||
}
|
||||
|
||||
func (*testEngineService) NewPayloadV1(
|
||||
_ context.Context, _ *pb.ExecutionPayload,
|
||||
) *pb.PayloadStatus {
|
||||
@@ -1632,17 +1392,6 @@ func (*testEngineService) NewPayloadV1(
|
||||
return item
|
||||
}
|
||||
|
||||
func (*testEngineService) NewPayloadV2(
|
||||
_ context.Context, _ *pb.ExecutionPayloadCapella,
|
||||
) *pb.PayloadStatus {
|
||||
fix := fixtures()
|
||||
item, ok := fix["ValidPayloadStatus"].(*pb.PayloadStatus)
|
||||
if !ok {
|
||||
panic("not found")
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
func forkchoiceUpdateSetup(t *testing.T, fcs *pb.ForkchoiceState, att *pb.PayloadAttributes, res *ForkchoiceUpdatedResponse) *Service {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -1682,45 +1431,6 @@ func forkchoiceUpdateSetup(t *testing.T, fcs *pb.ForkchoiceState, att *pb.Payloa
|
||||
return service
|
||||
}
|
||||
|
||||
func forkchoiceUpdateSetupV2(t *testing.T, fcs *pb.ForkchoiceState, att *pb.PayloadAttributesV2, res *ForkchoiceUpdatedResponse) *Service {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
forkChoiceStateReq, err := json.Marshal(fcs)
|
||||
require.NoError(t, err)
|
||||
payloadAttrsReq, err := json.Marshal(att)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(forkChoiceStateReq),
|
||||
))
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(payloadAttrsReq),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": res,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
return service
|
||||
}
|
||||
|
||||
func newPayloadSetup(t *testing.T, status *pb.PayloadStatus, payload *pb.ExecutionPayload) *Service {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -1754,37 +1464,3 @@ func newPayloadSetup(t *testing.T, status *pb.PayloadStatus, payload *pb.Executi
|
||||
service.rpcClient = rpcClient
|
||||
return service
|
||||
}
|
||||
|
||||
func newPayloadV2Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.ExecutionPayloadCapella) *Service {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
reqArg, err := json.Marshal(payload)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(reqArg),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": status,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
return service
|
||||
}
|
||||
|
||||
@@ -288,13 +288,13 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
headersMap := make(map[uint64]*types.HeaderInfo)
|
||||
rawLogCount, err := s.depositContractCaller.GetDepositCount(&bind.CallOpts{})
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "GetDepositCount")
|
||||
}
|
||||
logCount := binary.LittleEndian.Uint64(rawLogCount)
|
||||
|
||||
latestFollowHeight, err := s.followedBlockHeight(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "followedBlockHeight")
|
||||
}
|
||||
|
||||
batchSize := s.cfg.eth1HeaderReqLimit
|
||||
@@ -303,7 +303,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
for currentBlockNum < latestFollowHeight {
|
||||
currentBlockNum, batchSize, err = s.processBlockInBatch(ctx, currentBlockNum, latestFollowHeight, batchSize, additiveFactor, logCount, headersMap)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "processBlockInBatch, num=%d, height=%d", currentBlockNum, latestFollowHeight)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -313,7 +313,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
|
||||
c, err := s.cfg.beaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "FinalizedCheckpoint")
|
||||
}
|
||||
fRoot := bytesutil.ToBytes32(c.Root)
|
||||
// Return if no checkpoint exists yet.
|
||||
@@ -333,7 +333,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
if isNil || slots.ToEpoch(fState.Slot()) != c.Epoch {
|
||||
fState, err = s.cfg.stateGen.StateByRoot(ctx, fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "StateByRoot=%#x", fRoot)
|
||||
}
|
||||
}
|
||||
if fState != nil && !fState.IsNil() && fState.Eth1DepositIndex() > 0 {
|
||||
|
||||
@@ -45,7 +45,7 @@ func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpo
|
||||
}
|
||||
return errors.Wrap(err, errStr)
|
||||
}
|
||||
s.updateConnectedETH1(true)
|
||||
s.updateConnectedETH1(true, "execution chain connection established")
|
||||
s.runError = nil
|
||||
return nil
|
||||
}
|
||||
@@ -88,8 +88,9 @@ func (s *Service) pollConnectionStatus(ctx context.Context) {
|
||||
|
||||
// Forces to retry an execution client connection.
|
||||
func (s *Service) retryExecutionClientConnection(ctx context.Context, err error) {
|
||||
log.Debugf("retryExecutionClientConnection, err=%s", err.Error())
|
||||
s.runError = errors.Wrap(err, "retryExecutionClientConnection")
|
||||
s.updateConnectedETH1(false)
|
||||
s.updateConnectedETH1(false, "retryExecutionClientConnection")
|
||||
// Back off for a while before redialing.
|
||||
time.Sleep(backOffPeriod)
|
||||
currClient := s.rpcClient
|
||||
|
||||
@@ -307,7 +307,8 @@ func (s *Service) updateBeaconNodeStats() {
|
||||
s.cfg.beaconNodeStatsUpdater.Update(bs)
|
||||
}
|
||||
|
||||
func (s *Service) updateConnectedETH1(state bool) {
|
||||
func (s *Service) updateConnectedETH1(state bool, reason string) {
|
||||
log.Infof("updateConnectedETH1 - %s", reason)
|
||||
s.connectedETH1 = state
|
||||
s.updateBeaconNodeStats()
|
||||
}
|
||||
@@ -319,12 +320,11 @@ func (s *Service) followedBlockHeight(ctx context.Context) (uint64, error) {
|
||||
latestBlockTime := uint64(0)
|
||||
if s.latestEth1Data.BlockTime > followTime {
|
||||
latestBlockTime = s.latestEth1Data.BlockTime - followTime
|
||||
// This should only come into play in testnets - when the chain hasn't advanced past the follow distance,
|
||||
// we don't want to consider any block before the genesis block.
|
||||
if s.latestEth1Data.BlockHeight < params.BeaconConfig().Eth1FollowDistance {
|
||||
latestBlockTime = s.latestEth1Data.BlockTime
|
||||
}
|
||||
}
|
||||
log.WithField("distance", params.BeaconConfig().Eth1FollowDistance).WithField("followTime", followTime).WithField("BlockTime", s.latestEth1Data.BlockTime).WithField("BlockHeight", s.latestEth1Data.BlockHeight).WithField("latestBlockTime", latestBlockTime).Info("followedBlockHeight")
|
||||
blk, err := s.BlockByTimestamp(ctx, latestBlockTime)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "BlockByTimestamp=%d", latestBlockTime)
|
||||
@@ -483,7 +483,8 @@ func (s *Service) handleETH1FollowDistance() {
|
||||
// logs for the powchain service to process. Also it is a potential
|
||||
// failure condition as would mean we have not respected the protocol threshold.
|
||||
if s.latestEth1Data.LastRequestedBlock == s.latestEth1Data.BlockHeight {
|
||||
log.Error("Beacon node is not respecting the follow distance")
|
||||
// TODO this is usually an error but temporarily setting it to warn because its noisy when starting from genesis
|
||||
log.Warn("Beacon node is not respecting the follow distance")
|
||||
return
|
||||
}
|
||||
if err := s.requestBatchedHeadersAndLogs(ctx); err != nil {
|
||||
@@ -594,7 +595,7 @@ func (s *Service) run(done <-chan struct{}) {
|
||||
s.isRunning = false
|
||||
s.runError = nil
|
||||
s.rpcClient.Close()
|
||||
s.updateConnectedETH1(false)
|
||||
s.updateConnectedETH1(false, "context canceled in run()")
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case <-s.eth1HeadTicker.C:
|
||||
@@ -707,8 +708,6 @@ func (s *Service) determineEarliestVotingBlock(ctx context.Context, followBlock
|
||||
}
|
||||
return earliestBlk, nil
|
||||
}
|
||||
// This should only come into play in testnets - when the chain hasn't advanced past the follow distance,
|
||||
// we don't want to consider any block before the genesis block.
|
||||
if s.latestEth1Data.BlockHeight < params.BeaconConfig().Eth1FollowDistance {
|
||||
return 0, nil
|
||||
}
|
||||
@@ -718,9 +717,13 @@ func (s *Service) determineEarliestVotingBlock(ctx context.Context, followBlock
|
||||
return 0, errors.Errorf("invalid genesis time provided. %d > %d", followBackDist, votingTime)
|
||||
}
|
||||
earliestValidTime := votingTime - followBackDist
|
||||
if earliestValidTime < genesisTime {
|
||||
return 0, nil
|
||||
}
|
||||
/*
|
||||
if earliestValidTime > s.latestEth1Data.BlockTime {
|
||||
return 0, nil
|
||||
}
|
||||
log.WithField("earliestValidTime", earliestValidTime).Info("calling BlockByTimestamp")
|
||||
*/
|
||||
log.WithField("distance", params.BeaconConfig().Eth1FollowDistance).WithField("earliestValidTime", earliestValidTime).WithField("BlockTime", s.latestEth1Data.BlockTime).WithField("BlockHeight", s.latestEth1Data.BlockHeight).WithField("followBlock", followBlock).Info("determineEarliestVotingBlock")
|
||||
hdr, err := s.BlockByTimestamp(ctx, earliestValidTime)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
||||
@@ -26,7 +26,6 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//accounts/abi/bind/backends:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"encoding/json"
|
||||
"math"
|
||||
"math/big"
|
||||
|
||||
@@ -9,18 +9,17 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/pkg/errors"
|
||||
clparams "github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
)
|
||||
|
||||
// defaultMinerAddress is used to send deposits and test transactions in the e2e test.
|
||||
// This account is given a large initial balance in the genesis block in test setups.
|
||||
const defaultMinerAddress = "0x878705ba3f8bc32fcf7f4caa1a35e72af65cf766"
|
||||
const defaultTestChainId int64 = 1337
|
||||
const defaultCoinbase = "0x0000000000000000000000000000000000000000"
|
||||
const defaultDifficulty = "1"
|
||||
const defaultMixhash = "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
const defaultParenthash = "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
const defaultMinerBalance = "100000000000000000000000000000"
|
||||
const DefaultTestChainId int64 = 1337
|
||||
const DefaultCoinbase = "0x0000000000000000000000000000000000000000"
|
||||
const DefaultDifficulty = "0x20000"
|
||||
const DefaultMixhash = "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
const DefaultParenthash = "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
const DefaultMinerAddress = "0x878705ba3f8bc32fcf7f4caa1a35e72af65cf766"
|
||||
const DefaultMinerBalance = "100000000000000000000000000000"
|
||||
|
||||
// DepositContractCode is the compiled deposit contract code, via https://github.com/protolambda/merge-genesis-tools
|
||||
// This is embedded into genesis so that we can start the chain at a merge block.
|
||||
@@ -78,13 +77,9 @@ const DefaultCliqueSigner = "0x0000000000000000000000000000000000000000000000000
|
||||
// DefaultTestnetGenesis creates a genesis.json for eth1 clients with a set of defaults suitable for ephemeral testnets,
|
||||
// like in an e2e test. The parameters are minimal but the full value is returned unmarshaled so that it can be
|
||||
// customized as desired.
|
||||
func GethTestnetGenesis(genesisTime uint64, cfg *clparams.BeaconChainConfig) core.Genesis {
|
||||
ttd, ok := big.NewInt(0).SetString(clparams.BeaconConfig().TerminalTotalDifficulty, 10)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("unable to parse TerminalTotalDifficulty as an integer = %s", clparams.BeaconConfig().TerminalTotalDifficulty))
|
||||
}
|
||||
func GethTestnetGenesis(genesisTime uint64, cfg *clparams.BeaconChainConfig) *core.Genesis {
|
||||
cc := ¶ms.ChainConfig{
|
||||
ChainID: big.NewInt(defaultTestChainId),
|
||||
ChainID: big.NewInt(DefaultTestChainId),
|
||||
HomesteadBlock: bigz,
|
||||
DAOForkBlock: bigz,
|
||||
EIP150Block: bigz,
|
||||
@@ -100,51 +95,43 @@ func GethTestnetGenesis(genesisTime uint64, cfg *clparams.BeaconChainConfig) cor
|
||||
ArrowGlacierBlock: bigz,
|
||||
GrayGlacierBlock: bigz,
|
||||
MergeNetsplitBlock: bigz,
|
||||
TerminalTotalDifficulty: ttd,
|
||||
TerminalTotalDifficulty: bigz,
|
||||
TerminalTotalDifficultyPassed: false,
|
||||
Clique: ¶ms.CliqueConfig{
|
||||
Period: cfg.SecondsPerETH1Block,
|
||||
Epoch: 20000,
|
||||
},
|
||||
}
|
||||
da := defaultDepositContractAllocation(cfg.DepositContractAddress)
|
||||
ma := minerAllocation()
|
||||
extra, err := hexutil.Decode(DefaultCliqueSigner)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unable to decode DefaultCliqueSigner, with error %v", err.Error()))
|
||||
}
|
||||
return core.Genesis{
|
||||
da := DefaultDepositContractAllocation(cfg.DepositContractAddress)
|
||||
ma := MinerAllocation()
|
||||
return &core.Genesis{
|
||||
Config: cc,
|
||||
Nonce: 0, // overridden for authorized signer votes in clique, so we should leave it empty?
|
||||
Timestamp: genesisTime,
|
||||
ExtraData: extra,
|
||||
ExtraData: []byte(DefaultCliqueSigner),
|
||||
GasLimit: math.MaxUint64 >> 1, // shift 1 back from the max, just in case
|
||||
Difficulty: common.HexToHash(defaultDifficulty).Big(),
|
||||
Mixhash: common.HexToHash(defaultMixhash),
|
||||
Coinbase: common.HexToAddress(defaultCoinbase),
|
||||
Difficulty: common.HexToHash(DefaultDifficulty).Big(),
|
||||
Mixhash: common.HexToHash(DefaultMixhash),
|
||||
Coinbase: common.HexToAddress(DefaultCoinbase),
|
||||
Alloc: core.GenesisAlloc{
|
||||
da.Address: da.Account,
|
||||
ma.Address: ma.Account,
|
||||
},
|
||||
ParentHash: common.HexToHash(defaultParenthash),
|
||||
ParentHash: common.HexToHash(DefaultParenthash),
|
||||
}
|
||||
}
|
||||
|
||||
type depositAllocation struct {
|
||||
type DepositAllocation struct {
|
||||
Address common.Address
|
||||
Account core.GenesisAccount
|
||||
}
|
||||
|
||||
func minerAllocation() depositAllocation {
|
||||
return depositAllocation{
|
||||
Address: common.HexToAddress(defaultMinerAddress),
|
||||
func MinerAllocation() DepositAllocation {
|
||||
return DepositAllocation{
|
||||
Address: common.HexToAddress(DefaultMinerAddress),
|
||||
Account: core.GenesisAccount{
|
||||
Balance: minerBalance,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func defaultDepositContractAllocation(contractAddress string) depositAllocation {
|
||||
func DefaultDepositContractAllocation(contractAddress string) DepositAllocation {
|
||||
s := make(map[common.Hash]common.Hash)
|
||||
for k, v := range DefaultDepositContractStorage {
|
||||
s[common.HexToHash(k)] = common.HexToHash(v)
|
||||
@@ -153,7 +140,7 @@ func defaultDepositContractAllocation(contractAddress string) depositAllocation
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return depositAllocation{
|
||||
return DepositAllocation{
|
||||
Address: common.HexToAddress(contractAddress),
|
||||
Account: core.GenesisAccount{
|
||||
Code: codeBytes,
|
||||
@@ -168,8 +155,47 @@ func deterministicNonce(i uint64) uint64 {
|
||||
return math.MaxUint64/2 + i
|
||||
}
|
||||
|
||||
var ErrMalformedGenesisJson = errors.New("encoded genesis.json doesn't have expected layout")
|
||||
|
||||
// TerribleMarshalHack calls json.Marshal and also replaces the contract address string with a '0x' prefixed version,
|
||||
// because for some reason geth broke their json marshaling.
|
||||
func TerribleMarshalHack(g *core.Genesis, addresses ...string) ([]byte, error) {
|
||||
b, err := json.Marshal(g)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
or := make(map[string]json.RawMessage)
|
||||
err = json.Unmarshal(b, &or)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
alloc, ok := or["alloc"]
|
||||
if !ok {
|
||||
return nil, errors.Wrap(ErrMalformedGenesisJson, "no key in outer struct named 'alloc'")
|
||||
}
|
||||
ir := make(map[string]json.RawMessage)
|
||||
err = json.Unmarshal(alloc, &ir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, addr := range addresses {
|
||||
contract, ok := ir[addr[2:]]
|
||||
if !ok {
|
||||
return nil, errors.Wrapf(ErrMalformedGenesisJson, "no address matching %s", addr)
|
||||
}
|
||||
delete(ir, addr[2:])
|
||||
ir[addr] = contract
|
||||
}
|
||||
irb, err := json.Marshal(ir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
or["alloc"] = irb
|
||||
return json.Marshal(or)
|
||||
}
|
||||
|
||||
func init() {
|
||||
err := minerBalance.UnmarshalText([]byte(defaultMinerBalance))
|
||||
err := minerBalance.UnmarshalText([]byte(DefaultMinerBalance))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -15,7 +16,6 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
)
|
||||
|
||||
// EngineClient --
|
||||
@@ -24,8 +24,7 @@ type EngineClient struct {
|
||||
PayloadIDBytes *pb.PayloadIDBytes
|
||||
ForkChoiceUpdatedResp []byte
|
||||
ExecutionPayload *pb.ExecutionPayload
|
||||
ExecutionPayloadCapella *pb.ExecutionPayloadCapella
|
||||
ExecutionBlock *pb.ExecutionBlock
|
||||
ExecutionBlock *pb.ExecutionBlockBellatrix
|
||||
Err error
|
||||
ErrLatestExecBlock error
|
||||
ErrExecBlockByHash error
|
||||
@@ -33,7 +32,7 @@ type EngineClient struct {
|
||||
ErrNewPayload error
|
||||
ErrGetPayload error
|
||||
ExecutionPayloadByBlockHash map[[32]byte]*pb.ExecutionPayload
|
||||
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
|
||||
BlockByHashMap map[[32]byte]*pb.ExecutionBlockBellatrix
|
||||
NumReconstructedPayloads uint64
|
||||
TerminalBlockHash []byte
|
||||
TerminalBlockHashExists bool
|
||||
@@ -56,10 +55,7 @@ func (e *EngineClient) ForkchoiceUpdated(
|
||||
}
|
||||
|
||||
// GetPayload --
|
||||
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s types.Slot) (interfaces.ExecutionData, error) {
|
||||
if slots.ToEpoch(s) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
return blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella)
|
||||
}
|
||||
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, slot types.Slot) (interfaces.ExecutionData, error) {
|
||||
p, err := blocks.WrappedExecutionPayload(e.ExecutionPayload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -67,18 +63,23 @@ func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s types.Slot) (i
|
||||
return p, e.ErrGetPayload
|
||||
}
|
||||
|
||||
// GetPayloadV2 --
|
||||
func (e *EngineClient) GetPayloadV2(_ context.Context, _ [8]byte) (*pb.ExecutionPayloadCapella, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ExchangeTransitionConfiguration --
|
||||
func (e *EngineClient) ExchangeTransitionConfiguration(_ context.Context, _ *pb.TransitionConfiguration) error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// LatestExecutionBlock --
|
||||
func (e *EngineClient) LatestExecutionBlock(_ context.Context) (*pb.ExecutionBlock, error) {
|
||||
func (e *EngineClient) LatestExecutionBlock(_ context.Context) (*pb.ExecutionBlockBellatrix, error) {
|
||||
return e.ExecutionBlock, e.ErrLatestExecBlock
|
||||
}
|
||||
|
||||
// ExecutionBlockByHash --
|
||||
func (e *EngineClient) ExecutionBlockByHash(_ context.Context, h common.Hash, _ bool) (*pb.ExecutionBlock, error) {
|
||||
func (e *EngineClient) ExecutionBlockByHash(_ context.Context, _ int, h common.Hash, _ bool) (interface{}, error) {
|
||||
b, ok := e.BlockByHashMap[h]
|
||||
if !ok {
|
||||
return nil, errors.New("block not found")
|
||||
@@ -148,12 +149,16 @@ func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime
|
||||
if parentHash == params.BeaconConfig().ZeroHash {
|
||||
return nil, false, nil
|
||||
}
|
||||
parentBlk, err := e.ExecutionBlockByHash(ctx, parentHash, false /* with txs */)
|
||||
parentBlk, err := e.ExecutionBlockByHash(ctx, 0, parentHash, false /* with txs */)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get parent execution block")
|
||||
}
|
||||
parentBlkBellatrix, ok := parentBlk.(*pb.ExecutionBlockBellatrix)
|
||||
if !ok {
|
||||
return nil, false, fmt.Errorf("wrong execution block type %T", parentBlk)
|
||||
}
|
||||
if blockReachedTTD {
|
||||
b, err := hexutil.DecodeBig(parentBlk.TotalDifficulty)
|
||||
b, err := hexutil.DecodeBig(parentBlkBellatrix.TotalDifficulty)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
|
||||
}
|
||||
@@ -168,6 +173,6 @@ func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime
|
||||
} else {
|
||||
return nil, false, nil
|
||||
}
|
||||
blk = parentBlk
|
||||
blk = parentBlkBellatrix
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,7 +129,7 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
|
||||
return errNilBlockHeader
|
||||
}
|
||||
parentRoot := bytesutil.ToBytes32(bh.ParentRoot)
|
||||
var payloadHash [32]byte
|
||||
payloadHash := [32]byte{}
|
||||
if state.Version() >= version.Bellatrix {
|
||||
ph, err := state.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
@@ -669,14 +669,3 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceDump, er
|
||||
func (f *ForkChoice) SetBalancesByRooter(handler forkchoice.BalancesByRooter) {
|
||||
f.balancesByRoot = handler
|
||||
}
|
||||
|
||||
// Weight returns the weight of the given root if found on the store
|
||||
func (f *ForkChoice) Weight(root [32]byte) (uint64, error) {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
if !ok || n == nil {
|
||||
return 0, ErrNilNode
|
||||
}
|
||||
return n.weight, nil
|
||||
}
|
||||
|
||||
@@ -578,7 +578,7 @@ func TestStore_InsertOptimisticChain(t *testing.T) {
|
||||
blks := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 1
|
||||
var pr [32]byte
|
||||
pr := [32]byte{}
|
||||
blk.Block.ParentRoot = pr[:]
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -757,24 +757,3 @@ func TestForkChoice_UpdateCheckpoints(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWeight(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(0, 0)
|
||||
|
||||
root := [32]byte{'a'}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, root, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
require.Equal(t, true, ok)
|
||||
n.weight = 10
|
||||
w, err := f.Weight(root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(10), w)
|
||||
|
||||
w, err = f.Weight([32]byte{'b'})
|
||||
require.ErrorIs(t, err, ErrNilNode)
|
||||
require.Equal(t, uint64(0), w)
|
||||
}
|
||||
|
||||
@@ -414,7 +414,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
func TestForkChoice_BoostProposerRoot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
root := [32]byte{'A'}
|
||||
var zeroHash [32]byte
|
||||
zeroHash := [32]byte{}
|
||||
|
||||
t.Run("does not boost block from different slot", func(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
|
||||
@@ -360,7 +360,7 @@ func TestForkChoice_HighestReceivedBlockSlotRoot(t *testing.T) {
|
||||
func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
s := f.store
|
||||
var b [32]byte
|
||||
b := [32]byte{}
|
||||
|
||||
// Make sure it doesn't underflow
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
|
||||
@@ -120,16 +120,16 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// |
|
||||
// 3
|
||||
// |
|
||||
// 4
|
||||
// 4 <- head
|
||||
// /
|
||||
// 5 <- head, justified epoch = 2
|
||||
// 5 <- justified epoch = 2
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 2, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 2")
|
||||
assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
// Insert block 6 with justified epoch 3: verify it's head
|
||||
// 0
|
||||
@@ -138,15 +138,15 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// |
|
||||
// 3
|
||||
// |
|
||||
// 4
|
||||
// 4 <- head
|
||||
// / \
|
||||
// 5 6 <- head, justified epoch = 3
|
||||
// 5 6 <- justified epoch = 3
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 3, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 3")
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
// Moved 2 votes to block 5:
|
||||
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(5), 4)
|
||||
@@ -174,10 +174,10 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 3")
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
// Insert block 10 with justified epoch 3, it becomes head
|
||||
// Verify 10 is the head:
|
||||
// Insert block 9 with justified epoch 3, it becomes head
|
||||
// Verify 9 is the head:
|
||||
// 0
|
||||
// / \
|
||||
// 2 1
|
||||
@@ -289,7 +289,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
|
||||
|
||||
// Insert new block 11 and verify head is at 11.
|
||||
// 5 6
|
||||
|
||||
@@ -69,7 +69,6 @@ type Getter interface {
|
||||
HighestReceivedBlockRoot() [32]byte
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
ForkChoiceDump(context.Context) (*v1.ForkChoiceDump, error)
|
||||
Weight(root [32]byte) (uint64, error)
|
||||
VotedFraction(root [32]byte) (uint64, error)
|
||||
}
|
||||
|
||||
|
||||
@@ -103,7 +103,7 @@ func TestProcessUnaggregatedAttestationStateCached(t *testing.T) {
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
var root [32]byte
|
||||
root := [32]byte{}
|
||||
copy(root[:], "hello-world")
|
||||
|
||||
att := ðpb.Attestation{
|
||||
@@ -175,7 +175,7 @@ func TestProcessAggregatedAttestationStateCached(t *testing.T) {
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
var root [32]byte
|
||||
root := [32]byte{}
|
||||
copy(root[:], "hello-world")
|
||||
|
||||
att := ðpb.AggregateAttestationAndProof{
|
||||
|
||||
@@ -170,7 +170,7 @@ func TestProcessProposedBlock(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 256)
|
||||
var root [32]byte
|
||||
root := [32]byte{}
|
||||
copy(root[:], "hello-world")
|
||||
wb, err := blocks.NewBeaconBlock(tt.block)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -448,11 +448,6 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
|
||||
|
||||
if cliCtx.IsSet(flags.SlasherDirFlag.Name) {
|
||||
baseDir = cliCtx.String(flags.SlasherDirFlag.Name)
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
|
||||
clearDB := cliCtx.Bool(cmd.ClearDB.Name)
|
||||
forceClearDB := cliCtx.Bool(cmd.ForceClearDB.Name)
|
||||
|
||||
@@ -95,8 +95,8 @@ func TestNodeStart_Ok_registerDeterministicGenesisService(t *testing.T) {
|
||||
genesisState, _, err := interop.GenerateGenesisState(context.Background(), 0, numValidators)
|
||||
require.NoError(t, err, "Could not generate genesis beacon state")
|
||||
for i := uint64(1); i < 2; i++ {
|
||||
var someRoot [32]byte
|
||||
var someKey [fieldparams.BLSPubkeyLength]byte
|
||||
someRoot := [32]byte{}
|
||||
someKey := [fieldparams.BLSPubkeyLength]byte{}
|
||||
copy(someRoot[:], strconv.Itoa(int(i)))
|
||||
copy(someKey[:], strconv.Itoa(int(i)))
|
||||
genesisState.Validators = append(genesisState.Validators, ðpb.Validator{
|
||||
|
||||
@@ -52,8 +52,8 @@ func (c *AttCaches) SaveUnaggregatedAttestations(atts []*ethpb.Attestation) erro
|
||||
|
||||
// UnaggregatedAttestations returns all the unaggregated attestations in cache.
|
||||
func (c *AttCaches) UnaggregatedAttestations() ([]*ethpb.Attestation, error) {
|
||||
c.unAggregateAttLock.RLock()
|
||||
defer c.unAggregateAttLock.RUnlock()
|
||||
c.unAggregateAttLock.Lock()
|
||||
defer c.unAggregateAttLock.Unlock()
|
||||
unAggregatedAtts := c.unAggregatedAtt
|
||||
atts := make([]*ethpb.Attestation, 0, len(unAggregatedAtts))
|
||||
for _, att := range unAggregatedAtts {
|
||||
|
||||
@@ -120,7 +120,7 @@ func TestBatchAttestations_Single(t *testing.T) {
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
sig := priv.Sign([]byte("dummy_test_data"))
|
||||
var mockRoot [32]byte
|
||||
mockRoot := [32]byte{}
|
||||
d := ðpb.AttestationData{
|
||||
BeaconBlockRoot: mockRoot[:],
|
||||
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
@@ -162,7 +162,7 @@ func TestAggregateAndSaveForkChoiceAtts_Single(t *testing.T) {
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
sig := priv.Sign([]byte("dummy_test_data"))
|
||||
var mockRoot [32]byte
|
||||
mockRoot := [32]byte{}
|
||||
d := ðpb.AttestationData{
|
||||
BeaconBlockRoot: mockRoot[:],
|
||||
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
@@ -186,7 +186,7 @@ func TestAggregateAndSaveForkChoiceAtts_Multiple(t *testing.T) {
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
sig := priv.Sign([]byte("dummy_test_data"))
|
||||
var mockRoot [32]byte
|
||||
mockRoot := [32]byte{}
|
||||
d := ðpb.AttestationData{
|
||||
BeaconBlockRoot: mockRoot[:],
|
||||
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
|
||||
@@ -59,8 +59,8 @@ func (s *Service) pruneExpiredAtts() {
|
||||
if err := s.cfg.Pool.DeleteBlockAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not delete expired block attestation")
|
||||
}
|
||||
expiredBlockAtts.Inc()
|
||||
}
|
||||
expiredBlockAtts.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"//container/doubly-linked-list:go_default_library",
|
||||
"//crypto/bls/blst:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = ["mock.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec/mock",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user