Compare commits

...

27 Commits

Author SHA1 Message Date
terence tsao
2d217579f1 Update BUILD.bazel 2022-06-23 22:45:32 -04:00
terence tsao
33ef61ad26 Revert disallow lowerchecpoint 2022-06-23 22:38:47 -04:00
Nishant Das
58d10e3ace Deprecate Step Parameter from our Block By Range Requests (#10914)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-23 21:48:28 +00:00
terencechain
88becdc114 Register builder service (#10924) 2022-06-23 13:44:27 -07:00
Potuz
20b4c60bcb Apply Proposer Boost from forkchoice (#10893)
* Apply proposer boost at block insertion

* gaz

* fix tests

* revert time change

* conflict

* change genesis time in forkchoice on spectests

* Terence Review

Co-authored-by: terencechain <terence@prysmaticlabs.com>
2022-06-22 13:56:07 +00:00
terencechain
0b50ab7832 Consolidate ExecutionPayloadHeader Protobuf definitions (#10917) 2022-06-22 13:08:06 +02:00
Radosław Kapka
6b55d33ea2 Return error when requesting future state (#10915)
* Return error when requesting future state

* fix genesis test case

* fix test cases
2022-06-21 23:13:19 +00:00
terencechain
7e64a3963d Change submit validator registration to registration(s) (#10907)
* Change submit validator registration to registration(s)

* Fixed a few tests

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-21 18:46:28 +00:00
Potuz
55b4af9f92 track the wall clock on forkchoice spectests (#10916) 2022-06-21 18:12:08 +00:00
terencechain
546bb5ed53 Clean up misc warnings (#10900)
* Clean up misc warnings

* Gazelle

* Rm state assignments

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-21 15:26:56 +00:00
Radosław Kapka
29a25b3f09 Add spec URLs to API docs (#10912) 2022-06-21 14:29:44 +00:00
Jie Hou
56af079aea Change gocognit complextity threshold to 65 (#10906)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2022-06-20 03:57:39 +00:00
Potuz
385f101b2b Use forkchoice to verify finalized descendant (#10905)
* Use forkchoice to verify finalized descendant

* fix test
2022-06-20 00:55:17 +00:00
Radosław Kapka
b3c3b207c9 Enable fastssz to use vectorized HTR hashing algo (#10819)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-18 12:13:36 +00:00
Jie Hou
e0cd10ed3c Refactor: Reduce cognitive complexity for 5 out of top 10 functions in code base (#10854)
* Refactor waitForActivation

* Finish refactor of runner.go

* Refactor validator/client/metrics.go

* Refactored beacon-chain/sync/pending_attestations_queue.go

* Refactored beacon-chain/powchain/log_processing.go

* Resolve conflicts with develop branch

* Fix Deepsource: Context should be the first param

* Address review comments

* Put headersMap as pass-in function parameter

* Change function signature of processBlockInBatch

* Address nisdas's comment
2022-06-18 10:14:43 +00:00
Potuz
2d0fdf8b4a prune within forkchoice (#10896) 2022-06-17 12:53:17 -03:00
Potuz
3a7117dcbf Do not downcast time in currentslot (#10897)
* Do not downcast time in currentslot

* no magic constants

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-16 22:26:55 +00:00
Radosław Kapka
8888fa4bb3 Revert enable-native-state flag (#10898)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-06-16 20:59:46 +00:00
Radosław Kapka
f065209a3e Small API Middleware upgrades (#10895)
* remove useless comments

* add comment

* fix writing bug

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-16 19:19:05 +00:00
Potuz
e439f4aff6 Forkchoice duplicate store (#10840)
* forkchoice tests

* blockchain tests

* no fatal errors

* use ZeroHash for genesis

* deal with zerohashes at genesis

* avoid nil best justified checkpoint on new store

* unit tests

* Debug log

Co-authored-by: terencechain <terence@prysmaticlabs.com>

* Terence's review

* update capitalization

Co-authored-by: terencechain <terence@prysmaticlabs.com>

* update capitalization

Co-authored-by: terencechain <terence@prysmaticlabs.com>

* update capitalization

Co-authored-by: terencechain <terence@prysmaticlabs.com>

* update capitalization

Co-authored-by: terencechain <terence@prysmaticlabs.com>

Co-authored-by: terencechain <terence@prysmaticlabs.com>
2022-06-16 18:21:40 +00:00
terencechain
88f8dbecc8 Compute correct domain for validator registration (#10894)
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2022-06-16 16:32:04 +00:00
james-prysm
2dfe291cf9 Keymanager APIs: fee recipient api (#10850)
* initial commit wip

* setting protos for fee recipient

* updating proto based on specs

* updating apimiddleware

* generated APIs

* updating proto to fit spec

* fixing naming of fields

* fixing endpoint_factory and associated structs

* fixing imports

* adding in custom http types to grpc gateway

* adding import options

* changing package option

* still testing protos

* adding to bazel

* testing dependency changes

* more tests

* more tests

* more tests

* more tests

* more tests

* more tests

* testing changing repo dep

* testing deps

* testing deps

* testing deps

* testing deps

* testing deps

* testing deps

* reverting

* testing import

* testing bazel

* bazel test

* testing

* testing

* testing import

* updating generated proto code

* wip set fee recipient by pubkey

* adding list fee recipient logic

* fixing thrown error

* fixing bug with API

* fee recipient delete function

* updating generated proto logic

* fixing deposit api and adding postman tests

* fixing proto imports

* fixing unit tests and checksums

* fixing test

* adding unit tests

* fixing bazel

* Update validator/rpc/standard_api.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Update validator/rpc/standard_api.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* resolving review comments

* fixing return

* Update config/validator/service/proposer-settings.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/standard_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* updating proto

* updating message name

* fixing imports

* updating based on review comments

* adding middleware unit tests

* capitalizing errors

* using error instead of errorf

* fixing missed unit test variable rename

* fixing format variable

* fixing unit test

* Update validator/rpc/standard_api.go

* Update validator/rpc/standard_api.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-16 15:10:23 +00:00
Mike Neuder
a80c15f3a9 Refactor validator accounts import to remove cli context dependency (#10890)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-06-16 14:14:03 +00:00
Nishant Das
4de92bafc4 Improve Field Trie Recomputation (#10884)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-06-16 13:14:29 +00:00
terencechain
69438583e5 Pad Uint256's SSZBytes to length 32 (#10889)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-06-16 04:29:32 +00:00
Raul Jordan
e81f3fed01 Remove Extraneous BoltDB Logs (#10888) 2022-06-16 01:11:07 +00:00
Raul Jordan
1b2a5fb4a5 Update CODEOWNERS (#10887) 2022-06-15 21:51:44 +00:00
198 changed files with 5274 additions and 3586 deletions

10
.github/CODEOWNERS vendored
View File

@@ -9,8 +9,8 @@ deps.bzl @prysmaticlabs/core-team
# Radek and Nishant are responsible for changes that can affect the native state feature.
# See https://www.notion.so/prysmaticlabs/Native-Beacon-State-Redesign-6cc9744b4ec1439bb34fa829b36a35c1
/beacon-chain/state/fieldtrie/ @rkapka @nisdas
/beacon-chain/state/v1/ @rkapka @nisdas
/beacon-chain/state/v2/ @rkapka @nisdas
/beacon-chain/state/v3/ @rkapka @nisdas
/beacon-chain/state/state-native/ @rkapka @nisdas
/beacon-chain/state/fieldtrie/ @rkapka @nisdas @rauljordan
/beacon-chain/state/v1/ @rkapka @nisdas @rauljordan
/beacon-chain/state/v2/ @rkapka @nisdas @rauljordan
/beacon-chain/state/v3/ @rkapka @nisdas @rauljordan
/beacon-chain/state/state-native/ @rkapka @nisdas @rauljordan

View File

@@ -19,7 +19,7 @@ linters:
linters-settings:
gocognit:
# TODO: We should target for < 50
min-complexity: 97
min-complexity: 65
output:
print-issued-lines: true

View File

@@ -14,6 +14,7 @@ import (
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
)
@@ -300,7 +301,7 @@ func testSignedBlindedBeaconBlockBellatrix(t *testing.T) *eth.SignedBlindedBeaco
SyncCommitteeSignature: make([]byte, 48),
SyncCommitteeBits: bitfield.Bitvector512{0x01},
},
ExecutionPayloadHeader: &eth.ExecutionPayloadHeader{
ExecutionPayloadHeader: &v1.ExecutionPayloadHeader{
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),

View File

@@ -63,7 +63,7 @@ func sszBytesToUint256(b []byte) Uint256 {
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256
func (s Uint256) SSZBytes() []byte {
return bytesutil.ReverseByteOrder(s.Int.Bytes())
return bytesutil.PadTo(bytesutil.ReverseByteOrder(s.Int.Bytes()), 32)
}
var errUnmarshalUint256Failed = errors.New("unable to UnmarshalText into a Uint256 value")
@@ -149,8 +149,8 @@ func (bb *BuilderBid) ToProto() (*eth.BuilderBid, error) {
}, nil
}
func (h *ExecutionPayloadHeader) ToProto() (*eth.ExecutionPayloadHeader, error) {
return &eth.ExecutionPayloadHeader{
func (h *ExecutionPayloadHeader) ToProto() (*v1.ExecutionPayloadHeader, error) {
return &v1.ExecutionPayloadHeader{
ParentHash: h.ParentHash,
FeeRecipient: h.FeeRecipient,
StateRoot: h.StateRoot,
@@ -189,7 +189,7 @@ type ExecutionPayloadHeader struct {
BaseFeePerGas Uint256 `json:"base_fee_per_gas,omitempty"`
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
TransactionsRoot hexutil.Bytes `json:"transactions_root,omitempty"`
*eth.ExecutionPayloadHeader
*v1.ExecutionPayloadHeader
}
func (h *ExecutionPayloadHeader) MarshalJSON() ([]byte, error) {

View File

@@ -205,7 +205,7 @@ func TestExecutionHeaderResponseToProto(t *testing.T) {
expected := &eth.SignedBuilderBid{
Message: &eth.BuilderBid{
Header: &eth.ExecutionPayloadHeader{
Header: &v1.ExecutionPayloadHeader{
ParentHash: parentHash,
FeeRecipient: feeRecipient,
StateRoot: stateRoot,
@@ -567,9 +567,9 @@ func TestProposerSlashings(t *testing.T) {
require.Equal(t, expected, string(b))
}
func pbExecutionPayloadHeader(t *testing.T) *eth.ExecutionPayloadHeader {
func pbExecutionPayloadHeader(t *testing.T) *v1.ExecutionPayloadHeader {
bfpg := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
return &eth.ExecutionPayloadHeader{
return &v1.ExecutionPayloadHeader{
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
@@ -694,9 +694,10 @@ func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
}
func TestRoundTripUint256(t *testing.T) {
vs := "452312848583266388373324160190187140051835877600158453279131187530910662656"
vs := "4523128485832663883733241601901871400518358776001584532791311875309106626"
u := stringToUint256(vs)
sb := u.SSZBytes()
require.Equal(t, 32, len(sb))
uu := sszBytesToUint256(sb)
require.Equal(t, true, bytes.Equal(u.SSZBytes(), uu.SSZBytes()))
require.Equal(t, vs, uu.String())

View File

@@ -5,6 +5,7 @@ go_library(
srcs = [
"gateway.go",
"log.go",
"modifiers.go",
"options.go",
],
importpath = "github.com/prysmaticlabs/prysm/api/gateway",
@@ -23,6 +24,7 @@ go_library(
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//connectivity:go_default_library",
"@org_golang_google_grpc//credentials:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -15,6 +15,7 @@ go_library(
deps = [
"//api/grpc:go_default_library",
"//encoding/bytesutil:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -111,7 +111,7 @@ func (m *ApiProxyMiddleware) WithMiddleware(path string) http.HandlerFunc {
}
}
if req.Method == "DELETE" {
if req.Method == "DELETE" && req.Body != http.NoBody {
if errJson := handleDeleteRequestForEndpoint(endpoint, req); errJson != nil {
WriteError(w, errJson, nil)
return

View File

@@ -9,6 +9,7 @@ import (
"strings"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/wealdtech/go-bytesutil"
@@ -100,6 +101,20 @@ func base64ToHexProcessor(v reflect.Value) error {
return nil
}
func base64ToChecksumAddressProcessor(v reflect.Value) error {
if v.String() == "" {
// Empty hex values are represented as "0x".
v.SetString("0x")
return nil
}
b, err := base64.StdEncoding.DecodeString(v.String())
if err != nil {
return err
}
v.SetString(common.BytesToAddress(b).Hex())
return nil
}
func base64ToUint256Processor(v reflect.Value) error {
if v.String() == "" {
return nil

View File

@@ -104,6 +104,8 @@ func ReadGrpcResponseBody(r io.Reader) ([]byte, ErrorJson) {
}
// HandleGrpcResponseError acts on an error that resulted from a grpc-gateway's response.
// Whether there was an error is indicated by the bool return value. In case of an error,
// there is no need to write to the response because it's taken care of by the function.
func HandleGrpcResponseError(errJson ErrorJson, resp *http.Response, respBody []byte, w http.ResponseWriter) (bool, ErrorJson) {
responseHasError := false
if err := json.Unmarshal(respBody, errJson); err != nil {
@@ -149,6 +151,10 @@ func ProcessMiddlewareResponseFields(responseContainer interface{}) ErrorJson {
tag: "hex",
f: base64ToHexProcessor,
},
{
tag: "address",
f: base64ToChecksumAddressProcessor,
},
{
tag: "enum",
f: enumToLowercaseProcessor,

View File

@@ -31,21 +31,25 @@ func defaultRequestContainer() *testRequestContainer {
}
type testResponseContainer struct {
TestString string
TestHex string `hex:"true"`
TestEmptyHex string `hex:"true"`
TestUint256 string `uint256:"true"`
TestEnum string `enum:"true"`
TestTime string `time:"true"`
TestString string
TestHex string `hex:"true"`
TestEmptyHex string `hex:"true"`
TestAddress string `address:"true"`
TestEmptyAddress string `address:"true"`
TestUint256 string `uint256:"true"`
TestEnum string `enum:"true"`
TestTime string `time:"true"`
}
func defaultResponseContainer() *testResponseContainer {
return &testResponseContainer{
TestString: "test string",
TestHex: "Zm9v", // base64 encoding of "foo"
TestEmptyHex: "",
TestEnum: "Test Enum",
TestTime: "2006-01-02T15:04:05Z",
TestString: "test string",
TestHex: "Zm9v", // base64 encoding of "foo"
TestEmptyHex: "",
TestAddress: "Zm9v",
TestEmptyAddress: "",
TestEnum: "Test Enum",
TestTime: "2006-01-02T15:04:05Z",
// base64 encoding of 4196 in little-endian
TestUint256: "ZBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=",
@@ -247,6 +251,8 @@ func TestProcessMiddlewareResponseFields(t *testing.T) {
require.Equal(t, true, errJson == nil)
assert.Equal(t, "0x666f6f", container.TestHex)
assert.Equal(t, "0x", container.TestEmptyHex)
assert.Equal(t, "0x0000000000000000000000000000000000666F6f", container.TestAddress)
assert.Equal(t, "0x", container.TestEmptyAddress)
assert.Equal(t, "4196", container.TestUint256)
assert.Equal(t, "test enum", container.TestEnum)
assert.Equal(t, "1136214245", container.TestTime)
@@ -292,7 +298,7 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
v, ok = writer.Header()["Content-Length"]
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "181", v[0])
assert.Equal(t, "224", v[0])
assert.Equal(t, 204, writer.Code)
assert.DeepEqual(t, responseJson, writer.Body.Bytes())
})

30
api/gateway/modifiers.go Normal file
View File

@@ -0,0 +1,30 @@
package gateway
import (
"context"
"net/http"
"strconv"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"google.golang.org/protobuf/proto"
)
func HttpResponseModifier(ctx context.Context, w http.ResponseWriter, resp proto.Message) error {
md, ok := gwruntime.ServerMetadataFromContext(ctx)
if !ok {
return nil
}
// set http status code
if vals := md.HeaderMD.Get("x-http-code"); len(vals) > 0 {
code, err := strconv.Atoi(vals[0])
if err != nil {
return err
}
// delete the headers to not expose any grpc-metadata in http response
delete(md.HeaderMD, "x-http-code")
delete(w.Header(), "Grpc-Metadata-X-Http-Code")
w.WriteHeader(code)
}
return nil
}

View File

@@ -17,6 +17,7 @@ import (
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
@@ -37,31 +38,23 @@ func prepareForkchoiceState(
blockRoot [32]byte,
parentRoot [32]byte,
payloadHash [32]byte,
justifiedEpoch types.Epoch,
finalizedEpoch types.Epoch,
justified *ethpb.Checkpoint,
finalized *ethpb.Checkpoint,
) (state.BeaconState, [32]byte, error) {
blockHeader := &ethpb.BeaconBlockHeader{
ParentRoot: parentRoot[:],
}
executionHeader := &ethpb.ExecutionPayloadHeader{
executionHeader := &enginev1.ExecutionPayloadHeader{
BlockHash: payloadHash[:],
}
justifiedCheckpoint := &ethpb.Checkpoint{
Epoch: justifiedEpoch,
}
finalizedCheckpoint := &ethpb.Checkpoint{
Epoch: finalizedEpoch,
}
base := &ethpb.BeaconStateBellatrix{
Slot: slot,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
BlockRoots: make([][]byte, 1),
CurrentJustifiedCheckpoint: justifiedCheckpoint,
FinalizedCheckpoint: finalizedCheckpoint,
CurrentJustifiedCheckpoint: justified,
FinalizedCheckpoint: finalized,
LatestExecutionPayloadHeader: executionHeader,
LatestBlockHeader: blockHeader,
}
@@ -328,19 +321,21 @@ func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
func TestService_ChainHeads_ProtoArray(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}}
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
@@ -358,22 +353,24 @@ func TestService_ChainHeads_ProtoArray(t *testing.T) {
func TestService_ChainHeads_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
@@ -453,10 +450,12 @@ func TestService_IsOptimistic_ProtoArray(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
@@ -472,11 +471,13 @@ func TestService_IsOptimistic_DoublyLinkedTree(t *testing.T) {
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
@@ -496,10 +497,12 @@ func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
@@ -511,10 +514,12 @@ func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
func TestService_IsOptimisticForRoot_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))

View File

@@ -18,7 +18,6 @@ import (
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -90,7 +89,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
return nil, err
}
r, err := s.updateHead(ctx, s.justifiedBalances.balances)
r, err := s.cfg.ForkChoiceStore.Head(ctx, s.justifiedBalances.balances)
if err != nil {
return nil, err
}
@@ -154,7 +153,7 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
// notifyForkchoiceUpdate signals execution engine on a new payload.
// It returns true if the EL has returned VALID for the block
func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
postStateHeader *ethpb.ExecutionPayloadHeader, blk interfaces.SignedBeaconBlock) (bool, error) {
postStateHeader *enginev1.ExecutionPayloadHeader, blk interfaces.SignedBeaconBlock) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
defer span.End()

View File

@@ -57,13 +57,15 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
state: st,
}
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
@@ -300,25 +302,27 @@ func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
service, err := NewService(ctx, opts...)
service.justifiedBalances.balances = []uint64{50, 100, 200}
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, 0, 0)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 3, brc, brb, [32]byte{'C'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 3, brc, brb, [32]byte{'C'}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 4, brd, brc, [32]byte{'D'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 4, brd, brc, [32]byte{'D'}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 5, bre, brb, [32]byte{'E'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 5, bre, brb, [32]byte{'E'}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 6, brf, bre, [32]byte{'F'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 6, brf, bre, [32]byte{'F'}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 7, brg, bre, [32]byte{'G'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 7, brg, bre, [32]byte{'G'}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
@@ -405,12 +409,16 @@ func Test_NotifyNewPayload(t *testing.T) {
require.NoError(t, err)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
service.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
r, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 1, r, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 1, r, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
@@ -559,7 +567,7 @@ func Test_NotifyNewPayload(t *testing.T) {
}
service.cfg.ExecutionEngineCaller = e
root := [32]byte{'a'}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, root, root, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err := prepareForkchoiceState(ctx, 0, root, root, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
postVersion, postHeader, err := getStateVersionAndPayload(tt.postState)
@@ -830,7 +838,7 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
stateGen := stategen.New(beaconDB)
fcs := protoarray.New()
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stateGen),
@@ -846,9 +854,15 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
genesisRoot, err := genesisBlk.Block.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
state, blkRoot, err := prepareForkchoiceState(ctx, 0, genesisRoot, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
fjc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash}
require.NoError(t, fcs.UpdateJustifiedCheckpoint(fjc))
require.NoError(t, fcs.UpdateFinalizedCheckpoint(fjc))
state, blkRoot, err := prepareForkchoiceState(ctx, 0, genesisRoot, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
fcs.SetOriginRoot(genesisRoot)
genesisSummary := &ethpb.StateSummary{
Root: genesisStateRoot[:],
Slot: 0,
@@ -879,7 +893,9 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
Slot: 320,
}
require.NoError(t, beaconDB.SaveStateSummary(ctx, opStateSummary))
state, blkRoot, err = prepareForkchoiceState(ctx, 320, opRoot, genesisRoot, params.BeaconConfig().ZeroHash, 10, 10)
tenjc := &ethpb.Checkpoint{Epoch: 10, Root: genesisRoot[:]}
tenfc := &ethpb.Checkpoint{Epoch: 10, Root: genesisRoot[:]}
state, blkRoot, err = prepareForkchoiceState(ctx, 320, opRoot, genesisRoot, params.BeaconConfig().ZeroHash, tenjc, tenfc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, opRoot))
@@ -908,7 +924,9 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
Slot: 640,
}
require.NoError(t, beaconDB.SaveStateSummary(ctx, validSummary))
state, blkRoot, err = prepareForkchoiceState(ctx, 640, validRoot, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 20, 20)
twentyjc := &ethpb.Checkpoint{Epoch: 20, Root: validRoot[:]}
twentyfc := &ethpb.Checkpoint{Epoch: 20, Root: validRoot[:]}
state, blkRoot, err = prepareForkchoiceState(ctx, 640, validRoot, genesisRoot, params.BeaconConfig().ZeroHash, twentyjc, twentyfc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
require.NoError(t, fcs.SetOptimisticToValid(ctx, validRoot))

View File

@@ -10,11 +10,7 @@ import (
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/features"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
@@ -40,7 +36,7 @@ func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", jp.Root)
return errors.Wrap(err, msg)
}
headRoot, err := s.updateHead(ctx, balances)
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
if err != nil {
return errors.Wrap(err, "could not update head")
}
@@ -63,58 +59,6 @@ type head struct {
state state.BeaconState // current head state.
}
// Determined the head from the fork choice service and saves its new data
// (head root, head block, and head state) to the local service cache.
func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.updateHead")
defer span.End()
// Get head from the fork choice service.
f, err := s.store.FinalizedCheckpt()
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get finalized checkpoint")
}
j, err := s.store.JustifiedCheckpt()
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not get justified checkpoint")
}
// To get head before the first justified epoch, the fork choice will start with origin root
// instead of zero hashes.
headStartRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(j.Root))
// In order to process head, fork choice store requires justified info.
// If the fork choice store is missing justified block info, a node should
// re-initiate fork choice store using the latest justified info.
// This recovers a fatal condition and should not happen in run time.
if !s.cfg.ForkChoiceStore.HasNode(headStartRoot) {
jb, err := s.getBlock(ctx, headStartRoot)
if err != nil {
return [32]byte{}, err
}
st, err := s.cfg.StateGen.StateByRoot(ctx, s.ensureRootNotZeros(headStartRoot))
if err != nil {
return [32]byte{}, err
}
if features.Get().EnableForkChoiceDoublyLinkedTree {
s.cfg.ForkChoiceStore = doublylinkedtree.New()
} else {
s.cfg.ForkChoiceStore = protoarray.New()
}
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, st, f, j); err != nil {
return [32]byte{}, err
}
}
jc := &forkchoicetypes.Checkpoint{Epoch: j.Epoch, Root: headStartRoot}
fc := &forkchoicetypes.Checkpoint{Epoch: f.Epoch, Root: s.ensureRootNotZeros(bytesutil.ToBytes32(f.Root))}
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(jc); err != nil {
return [32]byte{}, err
}
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fc); err != nil {
return [32]byte{}, err
}
return s.cfg.ForkChoiceStore.Head(ctx, balances)
}
// This saves head info to the local service cache, it also saves the
// new head root to the DB.
func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock interfaces.SignedBeaconBlock, headState state.BeaconState) error {

View File

@@ -52,7 +52,9 @@ func TestSaveHead_Different(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, 0, 0)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
service.head = &head{
@@ -70,7 +72,7 @@ func TestSaveHead_Different(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
headState, err := util.NewBeaconState()
@@ -102,7 +104,9 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
oldRoot, err := oldBlock.Block().HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, 0, 0)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
service.head = &head{
@@ -122,7 +126,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
newRoot, err := newHeadBlock.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
headState, err := util.NewBeaconState()
@@ -158,26 +162,6 @@ func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
require.DeepEqual(t, balances, state.Balances(), "Incorrect justified balances")
}
func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
b := util.NewBeaconBlock()
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
state, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
service.store.SetJustifiedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{'b'})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{})
_, err = service.updateHead(context.Background(), []uint64{})
require.NoError(t, err)
}
func Test_notifyNewHeadEvent(t *testing.T) {
t.Run("genesis_state_root", func(t *testing.T) {
bState, _ := util.DeterministicGenesisState(t, 10)
@@ -288,7 +272,9 @@ func TestSaveOrphanedAtts_NoCommonAncestor(t *testing.T) {
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
b, err := wrapper.WrappedSignedBeaconBlock(blk)
@@ -341,11 +327,13 @@ func TestSaveOrphanedAtts(t *testing.T) {
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
b, err := wrapper.WrappedSignedBeaconBlock(blk)
@@ -402,11 +390,13 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
blk4.Block.ParentRoot = rG[:]
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
b, err := wrapper.WrappedSignedBeaconBlock(blk)
@@ -464,10 +454,12 @@ func TestSaveOrphanedAtts_NoCommonAncestor_DoublyLinkedTrie(t *testing.T) {
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
b, err := wrapper.WrappedSignedBeaconBlock(blk)
@@ -526,10 +518,12 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
b, err := wrapper.WrappedSignedBeaconBlock(blk)
@@ -592,10 +586,12 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
r4, err := blk4.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
b, err := wrapper.WrappedSignedBeaconBlock(blk)
@@ -620,6 +616,10 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
ojp := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, ojp, ojp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlockBellatrix())
require.NoError(t, err)
@@ -641,10 +641,10 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
headRoot := service.headRoot()
require.Equal(t, [32]byte{}, headRoot)
st, blkRoot, err := prepareForkchoiceState(ctx, 0, bellatrixBlkRoot, [32]byte{}, [32]byte{}, 0, 0)
st, blkRoot, err = prepareForkchoiceState(ctx, 0, bellatrixBlkRoot, [32]byte{}, [32]byte{}, fcp, fcp)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
newRoot, err := service.updateHead(ctx, []uint64{1, 2})
newRoot, err := service.cfg.ForkChoiceStore.Head(ctx, []uint64{1, 2})
require.NoError(t, err)
require.NotEqual(t, headRoot, newRoot)
require.Equal(t, headRoot, service.headRoot())

View File

@@ -35,20 +35,22 @@ func TestService_newSlot(t *testing.T) {
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
bj, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, 0, 0)
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // genesis
state, blkRoot, err = prepareForkchoiceState(ctx, 32, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 32, [32]byte{'a'}, [32]byte{}, [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // finalized
state, blkRoot, err = prepareForkchoiceState(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // justified
state, blkRoot, err = prepareForkchoiceState(ctx, 96, bj, [32]byte{'a'}, [32]byte{}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 96, bj, [32]byte{'a'}, [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // best justified
state, blkRoot, err = prepareForkchoiceState(ctx, 97, [32]byte{'d'}, [32]byte{}, [32]byte{}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 97, [32]byte{'d'}, [32]byte{}, [32]byte{}, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // bad

View File

@@ -269,7 +269,9 @@ func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1)
ojc := &ethpb.Checkpoint{Epoch: 1, Root: tRoot[:]}
ofc := &ethpb.Checkpoint{Epoch: 1, Root: tRoot[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.NoError(t, service.OnAttestation(ctx, att[0]))
@@ -297,7 +299,9 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1)
ojc := &ethpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
ofc := &ethpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.NoError(t, service.OnAttestation(ctx, att[0]))
@@ -604,10 +608,12 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(ctx, b32.Block.Slot, r32, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, b32.Block.Slot, r32, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, b33.Block.Slot, r33, r32, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, b33.Block.Slot, r33, r32, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))

View File

@@ -24,6 +24,7 @@ import (
"github.com/prysmaticlabs/prysm/crypto/bls"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/monitoring/tracing"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
@@ -139,18 +140,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
}
}
// We add a proposer score boost to fork choice for the block root if applicable, right after
// running a successful state transition for the block.
secondsIntoSlot := uint64(time.Since(s.genesisTime).Seconds()) % params.BeaconConfig().SecondsPerSlot
if err := s.cfg.ForkChoiceStore.BoostProposerRoot(ctx, &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: blockRoot,
BlockSlot: signed.Block().Slot(),
CurrentSlot: slots.SinceGenesis(s.genesisTime),
SecondsIntoSlot: secondsIntoSlot,
}); err != nil {
return err
}
// If slasher is configured, forward the attestations in the block via
// an event feed for processing.
if features.Get().EnableSlasher {
@@ -221,15 +210,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(postState.CurrentJustifiedCheckpoint(), h)
// Update Forkchoice checkpoints
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{
Epoch: psj.Epoch, Root: bytesutil.ToBytes32(psj.Root)}); err != nil {
return err
}
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
Epoch: psf.Epoch, Root: bytesutil.ToBytes32(psf.Root)}); err != nil {
return err
}
}
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(justified.Root))
@@ -237,7 +217,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", justified.Root)
return errors.Wrap(err, msg)
}
headRoot, err := s.updateHead(ctx, balances)
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
if err != nil {
log.WithError(err).Warn("Could not update head")
}
@@ -283,9 +263,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
return err
}
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
if err := s.cfg.ForkChoiceStore.Prune(ctx, fRoot); err != nil {
return errors.Wrap(err, "could not prune fork choice nodes")
}
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
@@ -319,11 +296,11 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
return s.handleEpochBoundary(ctx, postState)
}
func getStateVersionAndPayload(st state.BeaconState) (int, *ethpb.ExecutionPayloadHeader, error) {
func getStateVersionAndPayload(st state.BeaconState) (int, *enginev1.ExecutionPayloadHeader, error) {
if st == nil {
return 0, nil, errors.New("nil state")
}
var preStateHeader *ethpb.ExecutionPayloadHeader
var preStateHeader *enginev1.ExecutionPayloadHeader
var err error
preStateVersion := st.Version()
switch preStateVersion {
@@ -381,7 +358,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
}
type versionAndHeader struct {
version int
header *ethpb.ExecutionPayloadHeader
header *enginev1.ExecutionPayloadHeader
}
preVersionAndHeaders := make([]*versionAndHeader, len(blks))
postVersionAndHeaders := make([]*versionAndHeader, len(blks))
@@ -461,14 +438,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastBR); err != nil {
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
}
// Prune forkchoice store only if the new finalized checkpoint is higher
// than the finalized checkpoint in forkchoice store.
if fCheckpoints[len(blks)-1].Epoch > s.cfg.ForkChoiceStore.FinalizedCheckpoint().Epoch {
if err := s.cfg.ForkChoiceStore.Prune(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(fCheckpoints[len(blks)-1].Root))); err != nil {
return errors.Wrap(err, "could not prune fork choice nodes")
}
}
// Set their optimistic status
if isValidPayload {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, lastBR); err != nil {
@@ -631,7 +600,7 @@ func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk interfac
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
}
// Inserts attester slashing indices to fork choice store.
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
for _, slashing := range slashings {
@@ -683,7 +652,7 @@ func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b
}
// validateMergeTransitionBlock validates the merge transition block.
func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion int, stateHeader *ethpb.ExecutionPayloadHeader, blk interfaces.SignedBeaconBlock) error {
func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion int, stateHeader *enginev1.ExecutionPayloadHeader, blk interfaces.SignedBeaconBlock) error {
// Skip validation if block is older than Bellatrix.
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
return nil

View File

@@ -100,12 +100,11 @@ func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byt
return errors.Wrap(err, "could not get finalized checkpoint")
}
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
finalizedBlkSigned, err := s.getBlock(ctx, fRoot)
fSlot, err := slots.EpochStart(finalized.Epoch)
if err != nil {
return err
}
finalizedBlk := finalizedBlkSigned.Block()
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot())
bFinalizedRoot, err := s.ancestor(ctx, root[:], fSlot)
if err != nil {
return errors.Wrap(err, "could not get finalized block root")
}
@@ -115,7 +114,7 @@ func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byt
if !bytes.Equal(bFinalizedRoot, fRoot[:]) {
err := fmt.Errorf("block %#x is not a descendant of the current finalized block slot %d, %#x != %#x",
bytesutil.Trunc(root[:]), finalizedBlk.Slot(), bytesutil.Trunc(bFinalizedRoot),
bytesutil.Trunc(root[:]), fSlot, bytesutil.Trunc(bFinalizedRoot),
bytesutil.Trunc(fRoot[:]))
tracing.AnnotateError(span, err)
return invalidBlock{err}
@@ -217,13 +216,7 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
return err
}
s.store.SetJustifiedCheckptAndPayloadHash(cpt, h)
// Update forkchoice's justified checkpoint
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{
Epoch: cpt.Epoch, Root: bytesutil.ToBytes32(cpt.Root)}); err != nil {
return err
}
}
return nil
}
@@ -321,7 +314,8 @@ func (s *Service) ancestorByForkChoiceStore(ctx context.Context, r [32]byte, slo
if !s.cfg.ForkChoiceStore.HasParent(r) {
return nil, errors.New("could not find root in fork choice store")
}
return s.cfg.ForkChoiceStore.AncestorRoot(ctx, r, slot)
root, err := s.cfg.ForkChoiceStore.AncestorRoot(ctx, r, slot)
return root[:], err
}
// This retrieves an ancestor root using DB. The look up is recursively looking up DB. Slower than `ancestorByForkChoiceStore`.

View File

@@ -20,7 +20,6 @@ import (
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
@@ -249,29 +248,6 @@ func TestStore_OnBlock_DoublyLinkedTree(t *testing.T) {
}
}
func TestStore_OnBlock_ProposerBoostEarly(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New()
opts := []Option{
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: [32]byte{'A'},
BlockSlot: types.Slot(0),
CurrentSlot: 0,
SecondsIntoSlot: 0,
}
require.NoError(t, service.cfg.ForkChoiceStore.BoostProposerRoot(ctx, args))
_, err = service.cfg.ForkChoiceStore.Head(ctx, []uint64{})
require.ErrorContains(t, "could not apply proposer boost score: invalid proposer boost root", err)
}
func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
@@ -1341,13 +1317,15 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
b200.Block.ParentRoot = r100[:]
r200, err := b200.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, b := range []*ethpb.SignedBeaconBlock{b1, b100, b200} {
beaconBlock := util.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
state, blkRoot, err := prepareForkchoiceState(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err := prepareForkchoiceState(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
}
@@ -1387,6 +1365,8 @@ func TestAncestor_CanUseDB(t *testing.T) {
b200.Block.ParentRoot = r100[:]
r200, err := b200.Block.HashTreeRoot()
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
for _, b := range []*ethpb.SignedBeaconBlock{b1, b100, b200} {
beaconBlock := util.NewBeaconBlock()
beaconBlock.Block.Slot = b.Block.Slot
@@ -1396,7 +1376,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb)) // Saves blocks to DB.
}
state, blkRoot, err := prepareForkchoiceState(context.Background(), 200, r200, r200, params.BeaconConfig().ZeroHash, 0, 0)
state, blkRoot, err := prepareForkchoiceState(context.Background(), 200, r200, r200, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
@@ -1432,7 +1412,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
WithForkChoiceStore(fcs),
}
b := util.NewBeaconBlock()
b.Block.Slot = 1
b.Block.Slot = 32
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
@@ -1440,7 +1420,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
b1 := util.NewBeaconBlock()
b1.Block.Slot = 1
b1.Block.Slot = 32
b1.Block.Body.Graffiti = bytesutil.PadTo([]byte{'a'}, 32)
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
@@ -1493,7 +1473,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
for _, tt := range tests {
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: tt.args.finalizedRoot[:]}, [32]byte{})
service.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{Root: tt.args.finalizedRoot[:], Epoch: 1}, [32]byte{})
err = service.VerifyFinalizedBlkDescendant(ctx, tt.args.parentRoot)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
@@ -1863,7 +1843,7 @@ func Test_getStateVersionAndPayload(t *testing.T) {
name string
st state.BeaconState
version int
header *ethpb.ExecutionPayloadHeader
header *enginev1.ExecutionPayloadHeader
}{
{
name: "phase 0 state",
@@ -1872,7 +1852,7 @@ func Test_getStateVersionAndPayload(t *testing.T) {
return s
}(),
version: version.Phase0,
header: (*ethpb.ExecutionPayloadHeader)(nil),
header: (*enginev1.ExecutionPayloadHeader)(nil),
},
{
name: "altair state",
@@ -1881,19 +1861,19 @@ func Test_getStateVersionAndPayload(t *testing.T) {
return s
}(),
version: version.Altair,
header: (*ethpb.ExecutionPayloadHeader)(nil),
header: (*enginev1.ExecutionPayloadHeader)(nil),
},
{
name: "bellatrix state",
st: func() state.BeaconState {
s, _ := util.DeterministicGenesisStateBellatrix(t, 1)
require.NoError(t, s.SetLatestExecutionPayloadHeader(&ethpb.ExecutionPayloadHeader{
require.NoError(t, s.SetLatestExecutionPayloadHeader(&enginev1.ExecutionPayloadHeader{
BlockNumber: 1,
}))
return s
}(),
version: version.Bellatrix,
header: &ethpb.ExecutionPayloadHeader{
header: &enginev1.ExecutionPayloadHeader{
BlockNumber: 1,
},
},
@@ -1930,7 +1910,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
tests := []struct {
name string
stateVersion int
header *ethpb.ExecutionPayloadHeader
header *enginev1.ExecutionPayloadHeader
payload *enginev1.ExecutionPayload
errString string
}{
@@ -1985,7 +1965,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
payload: &enginev1.ExecutionPayload{
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
},
header: &ethpb.ExecutionPayloadHeader{
header: &enginev1.ExecutionPayloadHeader{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
@@ -2003,7 +1983,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
payload: &enginev1.ExecutionPayload{
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
},
header: &ethpb.ExecutionPayloadHeader{
header: &enginev1.ExecutionPayloadHeader{
BlockNumber: 1,
},
},

View File

@@ -160,7 +160,7 @@ func (s *Service) UpdateHead(ctx context.Context) error {
if err != nil {
return err
}
newHeadRoot, err := s.updateHead(ctx, balances)
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
if err != nil {
log.WithError(err).Warn("Resolving fork due to new attestation")
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
@@ -120,7 +121,9 @@ func TestProcessAttestations_Ok(t *testing.T) {
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1)
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
@@ -211,38 +214,58 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
opts = append(opts, WithAttestationPool(attestations.NewPool()), WithStateNotifier(&mockBeaconNode{}))
fcs := protoarray.New()
opts = append(opts,
WithAttestationPool(attestations.NewPool()),
WithStateNotifier(&mockBeaconNode{}),
WithForkChoiceStore(fcs),
)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
require.NoError(t, err)
tRoot := bytesutil.ToBytes32(atts[0].Data.Target.Root)
copied := genesisState.Copy()
copied, err = transition.ProcessSlots(ctx, copied, 1)
// Generate a new block for attesters to attest
blk, err := util.GenerateFullBlock(copied, pks, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: tRoot[:]}))
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1)
tRoot, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wsb, tRoot))
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
require.NoError(t, err)
require.Equal(t, 2, fcs.NodeCount())
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
// Generate attestatios for this block in Slot 1
atts, err := util.GenerateAttestations(copied, pks, 1, 1, false)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
b := util.NewBeaconBlock()
wb, err := wrapper.WrappedSignedBeaconBlock(b)
// Verify the target is in forchoice
require.Equal(t, true, fcs.HasNode(bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot)))
// Insert a new block to forkchoice
ojc := &ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}
b, err := util.GenerateFullBlock(genesisState, pks, util.DefaultBlockGenConfig(), 2)
require.NoError(t, err)
b.Block.ParentRoot = service.originBlockRoot[:]
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wb, err := wrapper.WrappedSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wb))
state, blkRoot, err = prepareForkchoiceState(ctx, wb.Block().Slot(), r, bytesutil.ToBytes32(wb.Block().ParentRoot()), [32]byte{}, 0, 0)
state, blkRoot, err := prepareForkchoiceState(ctx, 2, r, service.originBlockRoot, [32]byte{'b'}, ojc, ojc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.Equal(t, 3, fcs.NodeCount())
service.head.root = r // Old head
require.Equal(t, 1, len(service.cfg.AttPool.ForkchoiceAttestations()))
require.NoError(t, err, service.UpdateHead(ctx))
require.Equal(t, tRoot, service.head.root) // Validate head is the new one
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations())) // Validate att pool is empty
require.Equal(t, tRoot, service.head.root) // Validate head is the new one
}

View File

@@ -322,6 +322,8 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
opts := testServiceOptsWithDB(t)
s, err := NewService(context.Background(), opts...)
require.NoError(t, err)
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
s.store.SetFinalizedCheckptAndPayloadHash(&ethpb.Checkpoint{}, [32]byte{})
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
s.genesisTime = time.Now()

View File

@@ -222,6 +222,7 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
}
forkChoicer.SetGenesisTime(uint64(s.genesisTime.Unix()))
st, err := s.cfg.StateGen.StateByRoot(s.ctx, fRoot)
if err != nil {
@@ -478,9 +479,12 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
log.Fatalf("Could not process genesis block for fork choice: %v", err)
}
s.cfg.ForkChoiceStore.SetOriginRoot(genesisBlkRoot)
// Set genesis as fully validated
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, genesisBlkRoot); err != nil {
log.Fatalf("Could not set optimistic status of genesis block to false: %v", err)
return errors.Wrap(err, "Could not set optimistic status of genesis block to false")
}
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
s.setHead(genesisBlkRoot, genesisBlk, genesisState)
return nil

View File

@@ -117,7 +117,7 @@ func TestStateBalanceCache(t *testing.T) {
balances []uint64
name string
}
sentinelCacheMiss := errors.New("Cache missed, as expected!")
sentinelCacheMiss := errors.New("cache missed, as expected")
sentinelBalances := []uint64{1, 2, 3, 4, 5}
halfExpiredValidators, halfExpiredBalances := testHalfExpiredValidators()
halfQueuedValidators, halfQueuedBalances := testHalfQueuedValidators()

View File

@@ -1,6 +1,7 @@
package builder
import (
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/network"
@@ -27,10 +28,18 @@ func WithBuilderEndpoints(endpoint string) Option {
}
}
// WithDatabase sets the database for the beacon chain builder service.
func WithDatabase(database db.HeadAccessDatabase) Option {
// WithHeadFetcher gets the head info from chain service.
func WithHeadFetcher(svc *blockchain.Service) Option {
return func(s *Service) error {
s.cfg.beaconDB = database
s.cfg.headFetcher = svc
return nil
}
}
// WithDatabase for head access.
func WithDatabase(beaconDB db.HeadAccessDatabase) Option {
return func(s *Service) error {
s.cfg.beaconDB = beaconDB
return nil
}
}

View File

@@ -40,7 +40,9 @@ type Service struct {
// NewService instantiates a new service.
func NewService(ctx context.Context, opts ...Option) (*Service, error) {
s := &Service{}
s := &Service{
cfg: &config{},
}
for _, opt := range opts {
if err := opt(s); err != nil {
return nil, err
@@ -89,8 +91,8 @@ func (s *Service) GetHeader(ctx context.Context, slot types.Slot, parentHash [32
}
// Status retrieves the status of the builder relay network.
func (s *Service) Status(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "builder.Status")
func (s *Service) Status() error {
ctx, span := trace.StartSpan(context.Background(), "builder.Status")
defer span.End()
start := time.Now()
defer func() {

View File

@@ -12,7 +12,6 @@ import (
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/time/slots"
)
@@ -46,7 +45,7 @@ func IsMergeTransitionComplete(st state.BeaconState) (bool, error) {
// IsMergeTransitionBlockUsingPreStatePayloadHeader returns true if the input block is the terminal merge block.
// Terminal merge block must be associated with an empty payload header.
// This assumes the header `h` is referenced as the parent state for block body `body.
func IsMergeTransitionBlockUsingPreStatePayloadHeader(h *ethpb.ExecutionPayloadHeader, body interfaces.BeaconBlockBody) (bool, error) {
func IsMergeTransitionBlockUsingPreStatePayloadHeader(h *enginev1.ExecutionPayloadHeader, body interfaces.BeaconBlockBody) (bool, error) {
if h == nil || body == nil {
return false, errors.New("nil header or block body")
}
@@ -98,7 +97,7 @@ func IsExecutionEnabled(st state.BeaconState, body interfaces.BeaconBlockBody) (
// IsExecutionEnabledUsingHeader returns true if the execution is enabled using post processed payload header and block body.
// This is an optimized version of IsExecutionEnabled where beacon state is not required as an argument.
func IsExecutionEnabledUsingHeader(header *ethpb.ExecutionPayloadHeader, body interfaces.BeaconBlockBody) (bool, error) {
func IsExecutionEnabledUsingHeader(header *enginev1.ExecutionPayloadHeader, body interfaces.BeaconBlockBody) (bool, error) {
if !bellatrix.IsEmptyHeader(header) {
return true, nil
}
@@ -215,7 +214,7 @@ func ProcessPayload(st state.BeaconState, payload *enginev1.ExecutionPayload) (s
}
// ValidatePayloadHeaderWhenMergeCompletes validates the payload header when the merge completes.
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header *ethpb.ExecutionPayloadHeader) error {
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header *enginev1.ExecutionPayloadHeader) error {
// Skip validation if the state is not merge compatible.
complete, err := IsMergeTransitionComplete(st)
if err != nil {
@@ -236,7 +235,7 @@ func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header *ethpb
}
// ValidatePayloadHeader validates the payload header.
func ValidatePayloadHeader(st state.BeaconState, header *ethpb.ExecutionPayloadHeader) error {
func ValidatePayloadHeader(st state.BeaconState, header *enginev1.ExecutionPayloadHeader) error {
// Validate header's random mix matches with state in current epoch
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
if err != nil {
@@ -258,7 +257,7 @@ func ValidatePayloadHeader(st state.BeaconState, header *ethpb.ExecutionPayloadH
}
// ProcessPayloadHeader processes the payload header.
func ProcessPayloadHeader(st state.BeaconState, header *ethpb.ExecutionPayloadHeader) (state.BeaconState, error) {
func ProcessPayloadHeader(st state.BeaconState, header *enginev1.ExecutionPayloadHeader) (state.BeaconState, error) {
if err := ValidatePayloadHeaderWhenMergeCompletes(st, header); err != nil {
return nil, err
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/encoding/ssz"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
@@ -23,7 +22,7 @@ import (
func Test_IsMergeComplete(t *testing.T) {
tests := []struct {
name string
payload *ethpb.ExecutionPayloadHeader
payload *enginev1.ExecutionPayloadHeader
want bool
}{
{
@@ -33,7 +32,7 @@ func Test_IsMergeComplete(t *testing.T) {
},
{
name: "has parent hash",
payload: func() *ethpb.ExecutionPayloadHeader {
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
@@ -42,7 +41,7 @@ func Test_IsMergeComplete(t *testing.T) {
},
{
name: "has fee recipient",
payload: func() *ethpb.ExecutionPayloadHeader {
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.FeeRecipient = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
@@ -51,7 +50,7 @@ func Test_IsMergeComplete(t *testing.T) {
},
{
name: "has state root",
payload: func() *ethpb.ExecutionPayloadHeader {
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
@@ -60,7 +59,7 @@ func Test_IsMergeComplete(t *testing.T) {
},
{
name: "has receipt root",
payload: func() *ethpb.ExecutionPayloadHeader {
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
@@ -69,7 +68,7 @@ func Test_IsMergeComplete(t *testing.T) {
},
{
name: "has logs bloom",
payload: func() *ethpb.ExecutionPayloadHeader {
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.LogsBloom = bytesutil.PadTo([]byte{'a'}, fieldparams.LogsBloomLength)
return h
@@ -78,7 +77,7 @@ func Test_IsMergeComplete(t *testing.T) {
},
{
name: "has random",
payload: func() *ethpb.ExecutionPayloadHeader {
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
@@ -87,7 +86,7 @@ func Test_IsMergeComplete(t *testing.T) {
},
{
name: "has base fee",
payload: func() *ethpb.ExecutionPayloadHeader {
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.BaseFeePerGas = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
@@ -96,7 +95,7 @@ func Test_IsMergeComplete(t *testing.T) {
},
{
name: "has block hash",
payload: func() *ethpb.ExecutionPayloadHeader {
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
@@ -105,7 +104,7 @@ func Test_IsMergeComplete(t *testing.T) {
},
{
name: "has tx root",
payload: func() *ethpb.ExecutionPayloadHeader {
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.TransactionsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
@@ -114,7 +113,7 @@ func Test_IsMergeComplete(t *testing.T) {
},
{
name: "has extra data",
payload: func() *ethpb.ExecutionPayloadHeader {
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ExtraData = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
@@ -123,7 +122,7 @@ func Test_IsMergeComplete(t *testing.T) {
},
{
name: "has block number",
payload: func() *ethpb.ExecutionPayloadHeader {
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.BlockNumber = 1
return h
@@ -132,7 +131,7 @@ func Test_IsMergeComplete(t *testing.T) {
},
{
name: "has gas limit",
payload: func() *ethpb.ExecutionPayloadHeader {
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.GasLimit = 1
return h
@@ -141,7 +140,7 @@ func Test_IsMergeComplete(t *testing.T) {
},
{
name: "has gas used",
payload: func() *ethpb.ExecutionPayloadHeader {
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.GasUsed = 1
return h
@@ -150,7 +149,7 @@ func Test_IsMergeComplete(t *testing.T) {
},
{
name: "has time stamp",
payload: func() *ethpb.ExecutionPayloadHeader {
payload: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.Timestamp = 1
return h
@@ -175,7 +174,7 @@ func Test_IsMergeTransitionBlockUsingPayloadHeader(t *testing.T) {
tests := []struct {
name string
payload *enginev1.ExecutionPayload
header *ethpb.ExecutionPayloadHeader
header *enginev1.ExecutionPayloadHeader
want bool
}{
{
@@ -187,7 +186,7 @@ func Test_IsMergeTransitionBlockUsingPayloadHeader(t *testing.T) {
{
name: "non-empty header, empty payload",
payload: emptyPayload(),
header: func() *ethpb.ExecutionPayloadHeader {
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
@@ -388,7 +387,7 @@ func Test_IsExecutionEnabled(t *testing.T) {
tests := []struct {
name string
payload *enginev1.ExecutionPayload
header *ethpb.ExecutionPayloadHeader
header *enginev1.ExecutionPayloadHeader
useAltairSt bool
want bool
}{
@@ -408,7 +407,7 @@ func Test_IsExecutionEnabled(t *testing.T) {
{
name: "non-empty header, empty payload",
payload: emptyPayload(),
header: func() *ethpb.ExecutionPayloadHeader {
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
@@ -427,7 +426,7 @@ func Test_IsExecutionEnabled(t *testing.T) {
},
{
name: "non-empty header, non-empty payload",
header: func() *ethpb.ExecutionPayloadHeader {
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
@@ -464,7 +463,7 @@ func Test_IsExecutionEnabledUsingHeader(t *testing.T) {
tests := []struct {
name string
payload *enginev1.ExecutionPayload
header *ethpb.ExecutionPayloadHeader
header *enginev1.ExecutionPayloadHeader
want bool
}{
{
@@ -476,7 +475,7 @@ func Test_IsExecutionEnabledUsingHeader(t *testing.T) {
{
name: "non-empty header, empty payload",
payload: emptyPayload(),
header: func() *ethpb.ExecutionPayloadHeader {
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
@@ -495,7 +494,7 @@ func Test_IsExecutionEnabledUsingHeader(t *testing.T) {
},
{
name: "non-empty header, non-empty payload",
header: func() *ethpb.ExecutionPayloadHeader {
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
@@ -527,7 +526,7 @@ func Test_ValidatePayloadWhenMergeCompletes(t *testing.T) {
tests := []struct {
name string
payload *enginev1.ExecutionPayload
header *ethpb.ExecutionPayloadHeader
header *enginev1.ExecutionPayloadHeader
err error
}{
{
@@ -543,7 +542,7 @@ func Test_ValidatePayloadWhenMergeCompletes(t *testing.T) {
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: func() *ethpb.ExecutionPayloadHeader {
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
@@ -557,7 +556,7 @@ func Test_ValidatePayloadWhenMergeCompletes(t *testing.T) {
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: func() *ethpb.ExecutionPayloadHeader {
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.BlockHash = bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength)
return h
@@ -688,12 +687,12 @@ func Test_ProcessPayloadHeader(t *testing.T) {
require.NoError(t, err)
tests := []struct {
name string
header *ethpb.ExecutionPayloadHeader
header *enginev1.ExecutionPayloadHeader
err error
}{
{
name: "process passes",
header: func() *ethpb.ExecutionPayloadHeader {
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.PrevRandao = random
h.Timestamp = uint64(ts.Unix())
@@ -707,7 +706,7 @@ func Test_ProcessPayloadHeader(t *testing.T) {
},
{
name: "incorrect timestamp",
header: func() *ethpb.ExecutionPayloadHeader {
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.PrevRandao = random
h.Timestamp = 1
@@ -739,12 +738,12 @@ func Test_ValidatePayloadHeader(t *testing.T) {
require.NoError(t, err)
tests := []struct {
name string
header *ethpb.ExecutionPayloadHeader
header *enginev1.ExecutionPayloadHeader
err error
}{
{
name: "process passes",
header: func() *ethpb.ExecutionPayloadHeader {
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.PrevRandao = random
h.Timestamp = uint64(ts.Unix())
@@ -758,7 +757,7 @@ func Test_ValidatePayloadHeader(t *testing.T) {
},
{
name: "incorrect timestamp",
header: func() *ethpb.ExecutionPayloadHeader {
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.PrevRandao = random
h.Timestamp = 1
@@ -778,16 +777,16 @@ func Test_ValidatePayloadHeader(t *testing.T) {
func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
emptySt := st.Copy()
require.NoError(t, st.SetLatestExecutionPayloadHeader(&ethpb.ExecutionPayloadHeader{BlockHash: []byte{'a'}}))
require.NoError(t, st.SetLatestExecutionPayloadHeader(&enginev1.ExecutionPayloadHeader{BlockHash: []byte{'a'}}))
tests := []struct {
name string
state state.BeaconState
header *ethpb.ExecutionPayloadHeader
header *enginev1.ExecutionPayloadHeader
err error
}{
{
name: "no merge",
header: func() *ethpb.ExecutionPayloadHeader {
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
return h
}(),
@@ -796,7 +795,7 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
},
{
name: "process passes",
header: func() *ethpb.ExecutionPayloadHeader {
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = []byte{'a'}
return h
@@ -806,7 +805,7 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
},
{
name: "invalid block hash",
header: func() *ethpb.ExecutionPayloadHeader {
header: func() *enginev1.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = []byte{'b'}
return h
@@ -873,8 +872,8 @@ func BenchmarkBellatrixComplete(b *testing.B) {
}
}
func emptyPayloadHeader() *ethpb.ExecutionPayloadHeader {
return &ethpb.ExecutionPayloadHeader{
func emptyPayloadHeader() *enginev1.ExecutionPayloadHeader {
return &enginev1.ExecutionPayloadHeader{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),

View File

@@ -39,7 +39,6 @@ go_test(
"attestation_test.go",
"justification_finalization_test.go",
"new_test.go",
"precompute_test.go",
"reward_penalty_test.go",
"slashing_test.go",
],

View File

@@ -149,14 +149,14 @@ func computeCheckpoints(state state.BeaconState, newBits bitfield.Bitvector4) (*
finalizedCheckpoint := state.FinalizedCheckpoint()
// If 2/3 or more of the total balance attested in the current epoch.
if newBits.BitAt(0) && currentEpoch >= justifiedCheckpoint.Epoch {
if newBits.BitAt(0) {
blockRoot, err := helpers.BlockRoot(state, currentEpoch)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get block root for current epoch %d", currentEpoch)
}
justifiedCheckpoint.Epoch = currentEpoch
justifiedCheckpoint.Root = blockRoot
} else if newBits.BitAt(1) && prevEpoch >= justifiedCheckpoint.Epoch {
} else if newBits.BitAt(1) {
// If 2/3 or more of total balance attested in the previous epoch.
blockRoot, err := helpers.BlockRoot(state, prevEpoch)
if err != nil {

View File

@@ -250,18 +250,3 @@ func TestUnrealizedCheckpoints(t *testing.T) {
})
}
}
func Test_ComputeCheckpoints_CantUpdateToLower(t *testing.T) {
st, err := v2.InitializeFromProto(&ethpb.BeaconStateAltair{
Slot: params.BeaconConfig().SlotsPerEpoch * 2,
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{
Epoch: 2,
},
})
require.NoError(t, err)
jb := make(bitfield.Bitvector4, 1)
jb.SetBitAt(1, true)
cp, _, err := precompute.ComputeCheckpoints(st, jb)
require.NoError(t, err)
require.Equal(t, types.Epoch(2), cp.Epoch)
}

View File

@@ -1,3 +0,0 @@
package precompute
var ComputeCheckpoints = computeCheckpoints

View File

@@ -14,6 +14,7 @@ go_library(
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v3:go_default_library",
"//config/params:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],
)
@@ -25,6 +26,7 @@ go_test(
":go_default_library",
"//beacon-chain/core/time:go_default_library",
"//config/params:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",

View File

@@ -7,6 +7,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state"
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
"github.com/prysmaticlabs/prysm/config/params"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
@@ -65,7 +66,7 @@ func UpgradeToBellatrix(ctx context.Context, state state.BeaconState) (state.Bea
InactivityScores: inactivityScores,
CurrentSyncCommittee: currentSyncCommittee,
NextSyncCommittee: nextSyncCommittee,
LatestExecutionPayloadHeader: &ethpb.ExecutionPayloadHeader{
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeader{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),

View File

@@ -7,6 +7,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/execution"
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/config/params"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
@@ -61,7 +62,7 @@ func TestUpgradeToBellatrix(t *testing.T) {
header, err := mSt.LatestExecutionPayloadHeader()
require.NoError(t, err)
wanted := &ethpb.ExecutionPayloadHeader{
wanted := &enginev1.ExecutionPayloadHeader{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),

View File

@@ -42,7 +42,6 @@ go_test(
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
],
)

View File

@@ -3,7 +3,6 @@ package signing
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
@@ -11,17 +10,19 @@ var ErrNilRegistration = errors.New("nil signed registration")
// VerifyRegistrationSignature verifies the signature of a validator's registration.
func VerifyRegistrationSignature(
e types.Epoch,
f *ethpb.Fork,
sr *ethpb.SignedValidatorRegistrationV1,
genesisRoot []byte,
) error {
if sr == nil || sr.Message == nil {
return ErrNilRegistration
}
d := params.BeaconConfig().DomainApplicationBuilder
sd, err := Domain(f, e, d, genesisRoot)
// Per spec, we want the fork version and genesis validator to be nil.
// Which is genesis value and zero by default.
sd, err := ComputeDomain(
d,
nil, /* fork version */
nil /* genesis val root */)
if err != nil {
return err
}

View File

@@ -10,8 +10,6 @@ import (
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
)
func TestVerifyRegistrationSignature(t *testing.T) {
@@ -23,22 +21,22 @@ func TestVerifyRegistrationSignature(t *testing.T) {
Timestamp: uint64(time.Now().Unix()),
Pubkey: sk.PublicKey().Marshal(),
}
st, _ := util.DeterministicGenesisState(t, 1)
d := params.BeaconConfig().DomainApplicationBuilder
e := slots.ToEpoch(st.Slot())
sig, err := signing.ComputeDomainAndSign(st, e, reg, d, sk)
domain, err := signing.ComputeDomain(d, nil, nil)
require.NoError(t, err)
sr, err := signing.ComputeSigningRoot(reg, domain)
require.NoError(t, err)
sk.Sign(sr[:]).Marshal()
sReg := &ethpb.SignedValidatorRegistrationV1{
Message: reg,
Signature: sig,
Signature: sk.Sign(sr[:]).Marshal(),
}
f := st.Fork()
g := st.GenesisValidatorsRoot()
require.NoError(t, signing.VerifyRegistrationSignature(e, f, sReg, g))
require.NoError(t, signing.VerifyRegistrationSignature(sReg))
sReg.Signature = []byte("bad")
require.ErrorIs(t, signing.VerifyRegistrationSignature(e, f, sReg, g), signing.ErrSigFailedToVerify)
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrSigFailedToVerify)
sReg.Message = nil
require.ErrorIs(t, signing.VerifyRegistrationSignature(e, f, sReg, g), signing.ErrNilRegistration)
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrNilRegistration)
}

View File

@@ -105,7 +105,6 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
}
}
datafile := KVStoreDatafilePath(dirPath)
start := time.Now()
log.Infof("Opening Bolt DB at %s", datafile)
boltDB, err := bolt.Open(
datafile,
@@ -116,40 +115,29 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
},
)
if err != nil {
log.WithField("elapsed", time.Since(start)).Error("Failed to open Bolt DB")
if errors.Is(err, bolt.ErrTimeout) {
return nil, errors.New("cannot obtain database lock, database may be in use by another process")
}
return nil, err
}
log.WithField("elapsed", time.Since(start)).Info("Opened Bolt DB")
boltDB.AllocSize = boltAllocSize
start = time.Now()
log.Infof("Creating block cache...")
blockCache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: 1000, // number of keys to track frequency of (1000).
MaxCost: BlockCacheSize, // maximum cost of cache (1000 Blocks).
BufferItems: 64, // number of keys per Get buffer.
})
if err != nil {
log.WithField("elapsed", time.Since(start)).Error("Failed to create block cache")
return nil, err
}
log.WithField("elapsed", time.Since(start)).Info("Created block cache")
start = time.Now()
log.Infof("Creating validator cache...")
validatorCache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: NumOfValidatorEntries, // number of entries in cache (2 Million).
MaxCost: ValidatorEntryMaxCost, // maximum size of the cache (64Mb)
BufferItems: 64, // number of keys per Get buffer.
})
if err != nil {
log.WithField("elapsed", time.Since(start)).Error("Failed to to create validator cache")
return nil, err
}
log.WithField("elapsed", time.Since(start)).Info("Created validator cache")
kv := &Store{
db: boltDB,
@@ -159,8 +147,6 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
stateSummaryCache: newStateSummaryCache(),
ctx: ctx,
}
start = time.Now()
log.Infof("Updating DB and creating buckets...")
if err := kv.db.Update(func(tx *bolt.Tx) error {
return createBuckets(
tx,
@@ -195,13 +181,9 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
registrationBucket,
)
}); err != nil {
log.WithField("elapsed", time.Since(start)).Error("Failed to update db and create buckets")
return nil, err
}
log.WithField("elapsed", time.Since(start)).Info("Updated db and created buckets")
err = prometheus.Register(createBoltCollector(kv.db))
return kv, err
}

View File

@@ -30,6 +30,7 @@ go_library(
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
@@ -61,7 +62,7 @@ go_test(
"//consensus-types/primitives:go_default_library",
"//consensus-types/wrapper:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",

View File

@@ -59,7 +59,8 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
// 2 <- head
// |
// 3
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(2), Epoch: 1}
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(1), Epoch: 1}
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(0), Epoch: 0}
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head with justified epoch at 1")
@@ -218,6 +219,7 @@ func setup(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
ctx := context.Background()
f := New()
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: justifiedEpoch, Root: params.BeaconConfig().ZeroHash}
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: justifiedEpoch, Root: params.BeaconConfig().ZeroHash}
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: finalizedEpoch, Root: params.BeaconConfig().ZeroHash}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, params.BeaconConfig().ZeroHash, [32]byte{}, params.BeaconConfig().ZeroHash, justifiedEpoch, finalizedEpoch)
if err != nil {

View File

@@ -15,6 +15,7 @@ import (
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -22,13 +23,14 @@ import (
// New initializes a new fork choice store.
func New() *ForkChoice {
s := &Store{
justifiedCheckpoint: &forkchoicetypes.Checkpoint{},
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
proposerBoostRoot: [32]byte{},
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
slashedIndices: make(map[types.ValidatorIndex]bool),
pruneThreshold: defaultPruneThreshold,
justifiedCheckpoint: &forkchoicetypes.Checkpoint{},
bestJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
proposerBoostRoot: [32]byte{},
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
slashedIndices: make(map[types.ValidatorIndex]bool),
pruneThreshold: defaultPruneThreshold,
}
b := make([]uint64, 0)
@@ -139,13 +141,59 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.ReadOnlyBeaconS
return errInvalidNilCheckpoint
}
finalizedEpoch := fc.Epoch
return f.store.insert(ctx, slot, root, parentRoot, payloadHash, justifiedEpoch, finalizedEpoch)
err := f.store.insert(ctx, slot, root, parentRoot, payloadHash, justifiedEpoch, finalizedEpoch)
if err != nil {
return err
}
return f.updateCheckpoints(ctx, jc, fc)
}
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
// root is different than the current store finalized root, and the number of the store has met prune threshold.
func (f *ForkChoice) Prune(ctx context.Context, finalizedRoot [32]byte) error {
return f.store.prune(ctx, finalizedRoot)
// updateCheckpoints update the checkpoints when inserting a new node.
func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkpoint) error {
f.store.checkpointsLock.Lock()
if jc.Epoch > f.store.justifiedCheckpoint.Epoch {
if jc.Epoch > f.store.bestJustifiedCheckpoint.Epoch {
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: bytesutil.ToBytes32(jc.Root)}
}
currentSlot := slots.CurrentSlot(f.store.genesisTime)
if slots.SinceEpochStarts(currentSlot) < params.BeaconConfig().SafeSlotsToUpdateJustified {
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: bytesutil.ToBytes32(jc.Root)}
} else {
currentJcp := f.store.justifiedCheckpoint
currentRoot := currentJcp.Root
if currentRoot == params.BeaconConfig().ZeroHash {
currentRoot = f.store.originRoot
}
jSlot, err := slots.EpochStart(currentJcp.Epoch)
if err != nil {
f.store.checkpointsLock.Unlock()
return err
}
jcRoot := bytesutil.ToBytes32(jc.Root)
root, err := f.AncestorRoot(ctx, jcRoot, jSlot)
if err != nil {
f.store.checkpointsLock.Unlock()
return err
}
if root == currentRoot {
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: jcRoot}
}
}
}
// Update finalization
if fc.Epoch <= f.store.finalizedCheckpoint.Epoch {
f.store.checkpointsLock.Unlock()
return nil
}
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: fc.Epoch,
Root: bytesutil.ToBytes32(fc.Root)}
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: bytesutil.ToBytes32(jc.Root)}
f.store.checkpointsLock.Unlock()
return f.store.prune(ctx)
}
// HasNode returns true if the node exists in fork choice store,
@@ -208,7 +256,7 @@ func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
}
// AncestorRoot returns the ancestor root of input block root at a given slot.
func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([]byte, error) {
func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "protoArray.AncestorRoot")
defer span.End()
@@ -217,22 +265,22 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return nil, errors.Wrap(ErrNilNode, "could not determine ancestor root")
return [32]byte{}, errors.Wrap(ErrNilNode, "could not determine ancestor root")
}
n := node
for n != nil && n.slot > slot {
if ctx.Err() != nil {
return nil, ctx.Err()
return [32]byte{}, ctx.Err()
}
n = n.parent
}
if n == nil {
return nil, errors.Wrap(ErrNilNode, "could not determine ancestor root")
return [32]byte{}, errors.Wrap(ErrNilNode, "could not determine ancestor root")
}
return n.root[:], nil
return n.root, nil
}
// updateBalances updates the balances that directly voted for each block taking into account the
@@ -397,6 +445,10 @@ func (f *ForkChoice) UpdateJustifiedCheckpoint(jc *forkchoicetypes.Checkpoint) e
f.store.checkpointsLock.Lock()
defer f.store.checkpointsLock.Unlock()
f.store.justifiedCheckpoint = jc
bj := f.store.bestJustifiedCheckpoint
if bj == nil || jc.Epoch > bj.Epoch {
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch, Root: jc.Root}
}
return nil
}
@@ -484,3 +536,13 @@ func (f *ForkChoice) InsertOptimisticChain(ctx context.Context, chain []*forkcho
}
return nil
}
// SetGenesisTime sets the genesisTime tracked by forkchoice
func (f *ForkChoice) SetGenesisTime(genesisTime uint64) {
f.store.genesisTime = genesisTime
}
// SetOriginRoot sets the genesis block root
func (f *ForkChoice) SetOriginRoot(root [32]byte) {
f.store.originRoot = root
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"encoding/binary"
"testing"
"time"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
@@ -13,7 +14,7 @@ import (
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
@@ -35,7 +36,7 @@ func prepareForkchoiceState(
ParentRoot: parentRoot[:],
}
executionHeader := &ethpb.ExecutionPayloadHeader{
executionHeader := &enginev1.ExecutionPayloadHeader{
BlockHash: payloadHash[:],
}
@@ -244,7 +245,7 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
r, err := f.AncestorRoot(ctx, indexToHash(3), 6)
assert.NoError(t, err)
assert.Equal(t, bytesutil.ToBytes32(r), indexToHash(3))
assert.Equal(t, r, indexToHash(3))
_, err = f.AncestorRoot(ctx, indexToHash(3), 0)
assert.ErrorContains(t, ErrNilNode.Error(), err)
@@ -252,11 +253,11 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
root, err := f.AncestorRoot(ctx, indexToHash(3), 5)
require.NoError(t, err)
hash3 := indexToHash(3)
require.DeepEqual(t, hash3[:], root)
require.DeepEqual(t, hash3, root)
root, err = f.AncestorRoot(ctx, indexToHash(3), 1)
require.NoError(t, err)
hash1 := indexToHash(1)
require.DeepEqual(t, hash1[:], root)
require.DeepEqual(t, hash1, root)
}
func TestForkChoice_AncestorEqualSlot(t *testing.T) {
@@ -271,8 +272,7 @@ func TestForkChoice_AncestorEqualSlot(t *testing.T) {
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 100)
require.NoError(t, err)
root := bytesutil.ToBytes32(r)
require.Equal(t, root, [32]byte{'1'})
require.Equal(t, r, [32]byte{'1'})
}
func TestForkChoice_AncestorLowerSlot(t *testing.T) {
@@ -287,8 +287,7 @@ func TestForkChoice_AncestorLowerSlot(t *testing.T) {
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 150)
require.NoError(t, err)
root := bytesutil.ToBytes32(r)
require.Equal(t, root, [32]byte{'1'})
require.Equal(t, r, [32]byte{'1'})
}
func TestForkChoice_RemoveEquivocating(t *testing.T) {
@@ -350,7 +349,7 @@ func indexToHash(i uint64) [32]byte {
return hash.Hash(b[:])
}
func TestStore_UpdateCheckpoints(t *testing.T) {
func TestForkChoice_UpdateJustifiedAndFinalizedCheckpoints(t *testing.T) {
f := setup(1, 1)
jr := [32]byte{'j'}
fr := [32]byte{'f'}
@@ -593,3 +592,149 @@ func TestStore_InsertOptimisticChain(t *testing.T) {
f = setup(1, 1)
require.NoError(t, f.InsertOptimisticChain(context.Background(), args[2:]))
}
func TestForkChoice_UpdateCheckpoints(t *testing.T) {
ctx := context.Background()
tests := []struct {
name string
justified *forkchoicetypes.Checkpoint
bestJustified *forkchoicetypes.Checkpoint
finalized *forkchoicetypes.Checkpoint
newJustified *forkchoicetypes.Checkpoint
newFinalized *forkchoicetypes.Checkpoint
wantedJustified *forkchoicetypes.Checkpoint
wantedBestJustified *forkchoicetypes.Checkpoint
wantedFinalized *forkchoicetypes.Checkpoint
currentSlot types.Slot
wantedErr string
}{
{
name: "lower than store justified and finalized",
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
newJustified: &forkchoicetypes.Checkpoint{Epoch: 1},
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 0},
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
},
{
name: "higher than store justified, early slot, direct descendant",
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'g'}},
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
},
{
name: "higher than store justified, early slot, not a descendant",
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'g'}},
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
},
{
name: "higher than store justified, late slot, descendant",
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'g'}},
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
currentSlot: params.BeaconConfig().SafeSlotsToUpdateJustified.Add(1),
},
{
name: "higher than store justified, late slot, not descendant",
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'g'}},
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
currentSlot: params.BeaconConfig().SafeSlotsToUpdateJustified.Add(1),
},
{
name: "higher than store finalized, late slot, not descendant",
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'h'}},
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'h'}},
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
currentSlot: params.BeaconConfig().SafeSlotsToUpdateJustified.Add(1),
},
{
name: "Unknown checkpoint root, late slot",
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'d'}},
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'h'}},
currentSlot: params.BeaconConfig().SafeSlotsToUpdateJustified.Add(1),
wantedErr: "could not determine ancestor root",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fcs := setup(tt.justified.Epoch, tt.finalized.Epoch)
fcs.store.justifiedCheckpoint = tt.justified
fcs.store.finalizedCheckpoint = tt.finalized
fcs.store.bestJustifiedCheckpoint = tt.bestJustified
fcs.store.genesisTime = uint64(time.Now().Unix()) - uint64(tt.currentSlot)*params.BeaconConfig().SecondsPerSlot
state, blkRoot, err := prepareForkchoiceState(ctx, 32, [32]byte{'f'},
[32]byte{}, [32]byte{}, tt.finalized.Epoch, tt.finalized.Epoch)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 64, [32]byte{'j'},
[32]byte{'f'}, [32]byte{}, tt.justified.Epoch, tt.finalized.Epoch)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 96, [32]byte{'b'},
[32]byte{'j'}, [32]byte{}, tt.newJustified.Epoch, tt.newFinalized.Epoch)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 96, [32]byte{'c'},
[32]byte{'f'}, [32]byte{}, tt.newJustified.Epoch, tt.newFinalized.Epoch)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 65, [32]byte{'h'},
[32]byte{'f'}, [32]byte{}, tt.newFinalized.Epoch, tt.newFinalized.Epoch)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
// restart justifications cause insertion messed it up
fcs.store.justifiedCheckpoint = tt.justified
fcs.store.finalizedCheckpoint = tt.finalized
fcs.store.bestJustifiedCheckpoint = tt.bestJustified
jc := &ethpb.Checkpoint{Epoch: tt.newJustified.Epoch, Root: tt.newJustified.Root[:]}
fc := &ethpb.Checkpoint{Epoch: tt.newFinalized.Epoch, Root: tt.newFinalized.Root[:]}
err = fcs.updateCheckpoints(ctx, jc, fc)
if len(tt.wantedErr) > 0 {
require.ErrorContains(t, tt.wantedErr, err)
} else {
require.NoError(t, err)
require.Equal(t, tt.wantedJustified.Epoch, fcs.store.justifiedCheckpoint.Epoch)
require.Equal(t, tt.wantedFinalized.Epoch, fcs.store.finalizedCheckpoint.Epoch)
require.Equal(t, tt.wantedJustified.Root, fcs.store.justifiedCheckpoint.Root)
require.Equal(t, tt.wantedFinalized.Root, fcs.store.finalizedCheckpoint.Root)
require.Equal(t, tt.wantedBestJustified.Epoch, fcs.store.bestJustifiedCheckpoint.Epoch)
require.Equal(t, tt.wantedBestJustified.Root, fcs.store.bestJustifiedCheckpoint.Root)
}
})
}
}

View File

@@ -69,7 +69,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
// Insert block 5 with justified epoch of 2, verify head is still at 4.
// Insert block 5 with justified epoch of 2, verify head is 5
// 0
// / \
// 2 1
@@ -82,32 +82,6 @@ func TestNoVote_CanFindHead(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
// Verify there's an error when starting from a block with wrong justified epoch.
// 0
// / \
// 2 1
// | |
// head -> 4 3
// |
// 5 <- starting from 5 with justified epoch 0 should error
f.store.justifiedCheckpoint.Root = indexToHash(5)
_, err = f.Head(context.Background(), balances)
wanted := "head at slot 0 with weight 0 is not eligible, finalizedEpoch 1 != 1, justifiedEpoch 2 != 1"
require.ErrorContains(t, wanted, err)
// Set the justified epoch to 2 and start block to 5 to verify head is 5.
// 0
// / \
// 2 1
// | |
// 4 3
// |
// 5 <- head
f.store.justifiedCheckpoint.Epoch = 2
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 2")
// Insert block 6 with justified epoch of 2, verify head is at 6.

View File

@@ -4,36 +4,9 @@ import (
"context"
"github.com/pkg/errors"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/config/params"
)
// BoostProposerRoot sets the block root which should be boosted during
// the LMD fork choice algorithm calculations. This is meant to reward timely,
// proposed blocks which occur before a cutoff interval set to
// SECONDS_PER_SLOT // INTERVALS_PER_SLOT.
//
// time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
// is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
// if get_current_slot(store) == block.slot and is_before_attesting_interval:
// store.proposer_boost_root = hash_tree_root(block)
func (f *ForkChoice) BoostProposerRoot(_ context.Context, args *forkchoicetypes.ProposerBoostRootArgs) error {
if args == nil {
return errors.New("nil function args provided to BoostProposerRoot")
}
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
isBeforeAttestingInterval := args.SecondsIntoSlot < secondsPerSlot/params.BeaconConfig().IntervalsPerSlot
// Only update the boosted proposer root to the incoming block root
// if the block is for the current, clock-based slot and the block was timely.
if args.CurrentSlot == args.BlockSlot && isBeforeAttestingInterval {
f.store.proposerBoostLock.Lock()
f.store.proposerBoostRoot = args.BlockRoot
f.store.proposerBoostLock.Unlock()
}
return nil
}
// ResetBoostedProposerRoot sets the value of the proposer boosted root to zeros.
func (f *ForkChoice) ResetBoostedProposerRoot(_ context.Context) error {
f.store.proposerBoostLock.Lock()
@@ -64,9 +37,7 @@ func computeProposerBoostScore(validatorBalances []uint64) (score uint64, err er
err = errors.New("no active validators")
return
}
avgBalance := totalActiveBalance / numActive
committeeSize := numActive / uint64(params.BeaconConfig().SlotsPerEpoch)
committeeWeight := committeeSize * avgBalance
committeeWeight := totalActiveBalance / uint64(params.BeaconConfig().SlotsPerEpoch)
score = (committeeWeight * params.BeaconConfig().ProposerScoreBoost) / 100
return
}

View File

@@ -5,13 +5,18 @@ import (
"testing"
"time"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
// Helper function to simulate the block being on time or delayed for proposer
// boost. It alters the genesisTime tracked by the store.
func driftGenesisTime(f *ForkChoice, slot types.Slot, delay uint64) {
f.SetGenesisTime(uint64(time.Now().Unix()) - uint64(slot)*params.BeaconConfig().SecondsPerSlot - delay)
}
// Simple, ex-ante attack mitigation using proposer boost.
// In a nutshell, an adversarial block proposer in slot n+1 keeps its proposal hidden.
// The honest block proposer in slot n+2 will then propose an honest block. The
@@ -20,17 +25,12 @@ import (
// If the honest proposal is boosted at slot n+2, it will win against this attacker.
func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
ctx := context.Background()
jEpoch, fEpoch := types.Epoch(0), types.Epoch(0)
zeroHash := params.BeaconConfig().ZeroHash
balances := make([]uint64, 64) // 64 active validators.
for i := 0; i < len(balances); i++ {
balances[i] = 10
}
jEpoch, fEpoch := types.Epoch(0), types.Epoch(0)
t.Run("nil args check", func(t *testing.T) {
f := setup(jEpoch, fEpoch)
err := f.BoostProposerRoot(ctx, nil)
require.ErrorContains(t, "nil function args", err)
})
t.Run("back-propagates boost score to ancestors after proposer boosting", func(t *testing.T) {
f := setup(jEpoch, fEpoch)
@@ -44,6 +44,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// |
// 1 <- HEAD
slot := types.Slot(1)
driftGenesisTime(f, slot, 0)
newRoot := indexToHash(1)
state, blkRoot, err := prepareForkchoiceState(
ctx,
@@ -55,7 +56,6 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
fEpoch,
)
require.NoError(t, err)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.ProcessAttestation(ctx, []uint64{0}, newRoot, fEpoch)
headRoot, err = f.Head(ctx, balances)
@@ -69,6 +69,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// |
// 2 <- HEAD
slot = types.Slot(2)
driftGenesisTime(f, slot, 0)
newRoot = indexToHash(2)
state, blkRoot, err = prepareForkchoiceState(
ctx,
@@ -95,6 +96,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// |
// 3 <- HEAD
slot = types.Slot(3)
driftGenesisTime(f, slot, 0)
newRoot = indexToHash(3)
state, blkRoot, err = prepareForkchoiceState(
ctx,
@@ -120,8 +122,9 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// 2
// / \
// 3 |
// 4 <- HEAD
// 4 <- HEAD
slot = types.Slot(4)
driftGenesisTime(f, slot, 0)
newRoot = indexToHash(4)
state, blkRoot, err = prepareForkchoiceState(
ctx,
@@ -135,45 +138,24 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
clockSlot := types.Slot(4)
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: newRoot,
BlockSlot: slot,
CurrentSlot: clockSlot,
SecondsIntoSlot: 0,
}
require.NoError(t, f.BoostProposerRoot(ctx, args))
headRoot, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
// Check the ancestor scores from the store.
require.Equal(t, 5, len(f.store.nodeByRoot))
require.Equal(t, 5, f.NodeCount())
// Expect nodes to have a boosted, back-propagated score.
// Ancestors have the added weights of their children. Genesis is a special exception at 0 weight,
require.Equal(t, f.store.treeRootNode.weight, uint64(0))
// Otherwise, assuming a block, A, that is not-genesis:
//
// A -> B -> C
//
//Where each one has a weight of 10 individually, the final weights will look like
//
// (A: 30) -> (B: 20) -> (C: 10)
//
// The boost adds 8 to the weight, so if C is boosted, we would have
//
// (A: 38) -> (B: 28) -> (C: 18)
//
// In this case, we have a small fork:
// Proposer boost score with this tests parameters is 8
// Each of the nodes received one attestation accounting for 10.
// Node D is the only one with a proposer boost still applied:
//
// (A: 48) -> (B: 38) -> (C: 10)
// \_->(D: 18)
// \--------------->(D: 18)
//
// So B has its own weight, 10, and the sum of both C and D. That's why we see weight 54 in the
// middle instead of the normal progression of (54 -> 44 -> 24).
node1 := f.store.nodeByRoot[indexToHash(1)]
require.Equal(t, node1.weight, uint64(48))
node2 := f.store.nodeByRoot[indexToHash(2)]
@@ -189,7 +171,6 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
headRoot, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(3), headRoot, "Incorrect head for justified epoch at slot 4")
})
t.Run("vanilla ex ante attack", func(t *testing.T) {
f := setup(jEpoch, fEpoch)
@@ -208,6 +189,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// \
// C <- Slot 2 HEAD
honestBlockSlot := types.Slot(2)
driftGenesisTime(f, honestBlockSlot, 0)
honestBlock := indexToHash(2)
state, blkRoot, err := prepareForkchoiceState(
ctx,
@@ -243,15 +225,6 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
// We boost the honest proposal at slot 2.
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: honestBlock,
BlockSlot: honestBlockSlot,
CurrentSlot: types.Slot(2),
SecondsIntoSlot: 0,
}
require.NoError(t, f.BoostProposerRoot(ctx, args))
// The maliciously withheld block has one vote.
votes := []uint64{1}
f.ProcessAttestation(ctx, votes, maliciouslyWithheldBlock, fEpoch)
@@ -277,10 +250,11 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// C builds on A, as proposer at slot 1 did not reveal B.
// A
// / \
// (B?) \
// (B?) \
// \
// C <- Slot 2 HEAD
honestBlockSlot := types.Slot(2)
driftGenesisTime(f, honestBlockSlot, 0)
honestBlock := indexToHash(2)
state, blkRoot, err := prepareForkchoiceState(
ctx,
@@ -318,15 +292,6 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
// We boost the honest proposal at slot 2.
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: honestBlock,
BlockSlot: honestBlockSlot,
CurrentSlot: types.Slot(2),
SecondsIntoSlot: 0,
}
require.NoError(t, f.BoostProposerRoot(ctx, args))
// An attestation is received for B that has more voting power than C with the proposer boost,
// allowing B to then become the head if their attestation has enough adversarial votes.
votes := []uint64{1, 2}
@@ -360,6 +325,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
assert.Equal(t, zeroHash, r, "Incorrect head with genesis")
cSlot := types.Slot(2)
driftGenesisTime(f, cSlot, 0)
c := indexToHash(2)
state, blkRoot, err := prepareForkchoiceState(
ctx,
@@ -378,15 +344,6 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, c, r, "Incorrect head for justified epoch at slot 2")
// We boost C.
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: c,
BlockSlot: cSlot,
CurrentSlot: types.Slot(2),
SecondsIntoSlot: 0,
}
require.NoError(t, f.BoostProposerRoot(ctx, args))
bSlot := types.Slot(1)
b := indexToHash(1)
state, blkRoot, err = prepareForkchoiceState(
@@ -430,101 +387,74 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, c, r, "Expected C to remain the head")
// Block D receives the boost.
votes = []uint64{2}
f.ProcessAttestation(ctx, votes, d, fEpoch)
args = &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: d,
BlockSlot: dSlot,
CurrentSlot: types.Slot(3),
SecondsIntoSlot: 0,
}
require.NoError(t, f.BoostProposerRoot(ctx, args))
// If the same block arrives with boosting then it becomes head:
driftGenesisTime(f, dSlot, 0)
d2 := indexToHash(30)
state, blkRoot, err = prepareForkchoiceState(
ctx,
dSlot,
d2,
b, // parent
zeroHash,
jEpoch,
fEpoch,
)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
votes = []uint64{2}
f.ProcessAttestation(ctx, votes, d2, fEpoch)
// Ensure D becomes the head thanks to boosting.
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, d, r, "Expected D to become the head")
assert.Equal(t, d2, r, "Expected D to become the head")
})
}
func TestForkChoice_BoostProposerRoot(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.SecondsPerSlot = 6
cfg.IntervalsPerSlot = 3
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
root := [32]byte{'A'}
zeroHash := [32]byte{}
t.Run("does not boost block from different slot", func(t *testing.T) {
f := &ForkChoice{
store: &Store{},
}
blockRoot := [32]byte{'A'}
// Trying to boost a block from slot 0 should not work.
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: blockRoot,
BlockSlot: types.Slot(0),
CurrentSlot: types.Slot(1),
SecondsIntoSlot: 0,
}
err := f.BoostProposerRoot(ctx, args)
f := setup(0, 0)
slot := types.Slot(0)
currentSlot := types.Slot(1)
driftGenesisTime(f, currentSlot, 0)
state, blkRoot, err := prepareForkchoiceState(ctx, slot, root, zeroHash, zeroHash, 0, 0)
require.NoError(t, err)
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.Equal(t, [32]byte{}, f.store.proposerBoostRoot)
})
t.Run("does not boost untimely block from same slot", func(t *testing.T) {
f := &ForkChoice{
store: &Store{},
}
// Genesis set to 1 slot ago + X where X > attesting interval.
attestingInterval := time.Duration(cfg.SecondsPerSlot/cfg.IntervalsPerSlot) * time.Second
greaterThanAttestingInterval := attestingInterval + time.Second
// Trying to boost a block from slot 1 that is untimely should not work.
blockRoot := [32]byte{'A'}
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: blockRoot,
BlockSlot: types.Slot(1),
CurrentSlot: 1,
SecondsIntoSlot: uint64(greaterThanAttestingInterval.Seconds()),
}
err := f.BoostProposerRoot(ctx, args)
f := setup(0, 0)
slot := types.Slot(1)
currentSlot := types.Slot(1)
driftGenesisTime(f, currentSlot, uint64(params.BeaconConfig().SecondsPerSlot-1))
state, blkRoot, err := prepareForkchoiceState(ctx, slot, root, zeroHash, zeroHash, 0, 0)
require.NoError(t, err)
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.Equal(t, [32]byte{}, f.store.proposerBoostRoot)
})
t.Run("boosts perfectly timely block from same slot", func(t *testing.T) {
f := &ForkChoice{
store: &Store{},
}
// Genesis set to 1 slot ago + 0 seconds into the attesting interval.
blockRoot := [32]byte{'A'}
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: blockRoot,
BlockSlot: types.Slot(1),
CurrentSlot: types.Slot(1),
SecondsIntoSlot: 0,
}
err := f.BoostProposerRoot(ctx, args)
f := setup(0, 0)
slot := types.Slot(1)
currentSlot := types.Slot(1)
driftGenesisTime(f, currentSlot, 0)
state, blkRoot, err := prepareForkchoiceState(ctx, slot, root, zeroHash, zeroHash, 0, 0)
require.NoError(t, err)
require.DeepEqual(t, [32]byte{'A'}, f.store.proposerBoostRoot)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.Equal(t, root, f.store.proposerBoostRoot)
})
t.Run("boosts timely block from same slot", func(t *testing.T) {
f := &ForkChoice{
store: &Store{},
}
blockRoot := [32]byte{'A'}
halfAttestingInterval := time.Second
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: blockRoot,
BlockSlot: types.Slot(1),
CurrentSlot: types.Slot(1),
SecondsIntoSlot: uint64(halfAttestingInterval.Seconds()),
}
err := f.BoostProposerRoot(ctx, args)
f := setup(0, 0)
slot := types.Slot(1)
currentSlot := types.Slot(1)
driftGenesisTime(f, currentSlot, 1)
state, blkRoot, err := prepareForkchoiceState(ctx, slot, root, zeroHash, zeroHash, 0, 0)
require.NoError(t, err)
require.DeepEqual(t, [32]byte{'A'}, f.store.proposerBoostRoot)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.Equal(t, root, f.store.proposerBoostRoot)
})
}

View File

@@ -3,10 +3,12 @@ package doublylinkedtree
import (
"context"
"fmt"
"time"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/time/slots"
"go.opencensus.io/trace"
)
@@ -63,11 +65,20 @@ func (s *Store) PruneThreshold() uint64 {
func (s *Store) head(ctx context.Context) ([32]byte, error) {
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.head")
defer span.End()
s.checkpointsLock.RLock()
defer s.checkpointsLock.RUnlock()
// JustifiedRoot has to be known
justifiedNode, ok := s.nodeByRoot[s.justifiedCheckpoint.Root]
if !ok || justifiedNode == nil {
return [32]byte{}, errUnknownJustifiedRoot
// If the justifiedCheckpoint is from genesis, then the root is
// zeroHash. In this case it should be the root of forkchoice
// tree.
if s.justifiedCheckpoint.Epoch == params.BeaconConfig().GenesisEpoch {
justifiedNode = s.treeRootNode
} else {
return [32]byte{}, errUnknownJustifiedRoot
}
}
// If the justified node doesn't have a best descendant,
@@ -78,8 +89,8 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
}
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch) {
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch %d != %d, justifiedEpoch %d != %d",
bestDescendant.slot, bestDescendant.weight/10e9, bestDescendant.finalizedEpoch, s.finalizedCheckpoint.Epoch, bestDescendant.justifiedEpoch, s.justifiedCheckpoint.Epoch)
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch, justified Epoch %d, %d != %d, %d",
bestDescendant.slot, bestDescendant.weight/10e9, bestDescendant.finalizedEpoch, bestDescendant.justifiedEpoch, s.finalizedCheckpoint.Epoch, s.justifiedCheckpoint.Epoch)
}
// Update metrics.
@@ -134,12 +145,26 @@ func (s *Store) insert(ctx context.Context,
}
} else {
parent.children = append(parent.children, n)
// Apply proposer boost
timeNow := uint64(time.Now().Unix())
if timeNow < s.genesisTime {
return nil
}
secondsIntoSlot := (timeNow - s.genesisTime) % params.BeaconConfig().SecondsPerSlot
currentSlot := slots.CurrentSlot(s.genesisTime)
boostTreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
if currentSlot == slot && secondsIntoSlot < boostTreshold {
s.proposerBoostLock.Lock()
s.proposerBoostRoot = root
s.proposerBoostLock.Unlock()
}
// Update best descendants
if err := s.treeRootNode.updateBestDescendant(ctx,
s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch); err != nil {
return err
}
}
// Update metrics.
processedBlockCount.Inc()
nodeCount.Set(float64(len(s.nodeByRoot)))
@@ -170,12 +195,15 @@ func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalized
// prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
// root is different than the current store finalized root, and the number of the store has met prune threshold.
// This function does not prune for invalid optimistically synced nodes, it deals only with pruning upon finalization
func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
func (s *Store) prune(ctx context.Context) error {
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Prune")
defer span.End()
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
s.checkpointsLock.RLock()
finalizedRoot := s.finalizedCheckpoint.Root
s.checkpointsLock.RUnlock()
finalizedNode, ok := s.nodeByRoot[finalizedRoot]
if !ok || finalizedNode == nil {

View File

@@ -79,7 +79,7 @@ func TestForkChoice_HasNode(t *testing.T) {
func TestStore_Head_UnknownJustifiedRoot(t *testing.T) {
f := setup(0, 0)
f.store.justifiedCheckpoint.Root = [32]byte{'a'}
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'a'}}
_, err := f.store.head(context.Background())
assert.ErrorContains(t, errUnknownJustifiedRoot.Error(), err)
}
@@ -171,7 +171,8 @@ func TestStore_Prune_LessThanThreshold(t *testing.T) {
// Finalized root has depth 99 so everything before it should be pruned,
// but PruneThreshold is at 100 so nothing will be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
s.finalizedCheckpoint.Root = indexToHash(99)
require.NoError(t, s.prune(context.Background()))
assert.Equal(t, 100, len(s.nodeByRoot), "Incorrect nodes count")
}
@@ -193,7 +194,8 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
s.pruneThreshold = 0
// Finalized root is at index 99 so everything before 99 should be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
s.finalizedCheckpoint.Root = indexToHash(99)
require.NoError(t, s.prune(context.Background()))
assert.Equal(t, 1, len(s.nodeByRoot), "Incorrect nodes count")
}
@@ -215,11 +217,13 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
s.pruneThreshold = 0
// Finalized root is at index 11 so everything before 11 should be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(10)))
s.finalizedCheckpoint.Root = indexToHash(10)
require.NoError(t, s.prune(context.Background()))
assert.Equal(t, 90, len(s.nodeByRoot), "Incorrect nodes count")
// One more time.
require.NoError(t, s.prune(context.Background(), indexToHash(20)))
s.finalizedCheckpoint.Root = indexToHash(20)
require.NoError(t, s.prune(context.Background()))
assert.Equal(t, 80, len(s.nodeByRoot), "Incorrect nodes count")
}
@@ -242,7 +246,8 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
f.store.pruneThreshold = 0
s := f.store
require.NoError(t, s.prune(context.Background(), indexToHash(1)))
s.finalizedCheckpoint.Root = indexToHash(1)
require.NoError(t, s.prune(context.Background()))
require.Equal(t, len(s.nodeByRoot), 1)
}
@@ -324,7 +329,8 @@ func TestStore_PruneMapsNodes(t *testing.T) {
s := f.store
s.pruneThreshold = 0
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1))))
s.finalizedCheckpoint.Root = indexToHash(1)
require.NoError(t, s.prune(context.Background()))
require.Equal(t, len(s.nodeByRoot), 1)
}

View File

@@ -19,6 +19,7 @@ type ForkChoice struct {
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
type Store struct {
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
bestJustifiedCheckpoint *forkchoicetypes.Checkpoint // best justified checkpoint in store.
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
pruneThreshold uint64 // do not prune tree unless threshold is reached.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
@@ -29,9 +30,11 @@ type Store struct {
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
nodeByPayload map[[fieldparams.RootLength]byte]*Node // nodes indexed by payload Hash
slashedIndices map[types.ValidatorIndex]bool // the list of equivocating validator indices
originRoot [fieldparams.RootLength]byte // The genesis block root
nodesLock sync.RWMutex
proposerBoostLock sync.RWMutex
checkpointsLock sync.RWMutex
genesisTime uint64
}
// Node defines the individual block which includes its block parent, ancestor and how much weight accounted for it.

View File

@@ -4,7 +4,6 @@ import (
"context"
"testing"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
@@ -114,7 +113,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
// Insert block 5 with justified epoch 2, it should be filtered out:
// Insert block 5 with justified epoch 2, it becomes head
// 0
// / \
// 2 1
@@ -130,9 +129,9 @@ func TestVotes_CanFindHead(t *testing.T) {
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 1")
// Insert block 6 with justified epoch 0:
// Insert block 6 with justified epoch 3: verify it's head
// 0
// / \
// 2 1
@@ -141,28 +140,18 @@ func TestVotes_CanFindHead(t *testing.T) {
// |
// 4 <- head
// / \
// 5 6 <- justified epoch = 0
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 1, 1)
// 5 6 <- justified epoch = 3
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 3, 2)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 1")
// Moved 2 votes to block 5:
// 0
// / \
// 2 1
// |
// 3
// |
// 4
// / \
// 2 votes-> 5 6
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(5), 4)
// Inset blocks 7, 8 and 9:
// Inset blocks 7 and 8
// 6 should still be the head, even though 5 has all the votes.
// 0
// / \
@@ -177,23 +166,17 @@ func TestVotes_CanFindHead(t *testing.T) {
// 7
// |
// 8
// |
// 9
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(7), indexToHash(5), params.BeaconConfig().ZeroHash, 2, 2)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(8), indexToHash(7), params.BeaconConfig().ZeroHash, 2, 2)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(9), indexToHash(8), params.BeaconConfig().ZeroHash, 2, 2)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 1")
// Update fork choice justified epoch to 2 and start block to 5.
// Insert block 9 with justified epoch 3, it becomes head
// Verify 9 is the head:
// 0
// / \
@@ -209,15 +192,15 @@ func TestVotes_CanFindHead(t *testing.T) {
// |
// 8
// |
// 9 <- head
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(5), Epoch: 2}
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(5), Epoch: 2}
// 10 <- head
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(10), indexToHash(8), params.BeaconConfig().ZeroHash, 3, 2)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
// Insert block 10 and 2 validators updated their vote to 9.
// Verify 9 is the head:
// Insert block 9 forking 10 verify it's head (lexicographic order)
// 0
// / \
// 2 1
@@ -232,54 +215,63 @@ func TestVotes_CanFindHead(t *testing.T) {
// |
// 8
// / \
// 2 votes->9 10
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(9), 5)
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(10), indexToHash(8), params.BeaconConfig().ZeroHash, 2, 2)
// 9 10
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(9), indexToHash(8), params.BeaconConfig().ZeroHash, 3, 2)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 3")
// Move two votes for 10, verify it's head
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(10), 5)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
// Add 3 more validators to the system.
balances = []uint64{1, 1, 1, 1, 1}
// The new validators voted for 10.
f.ProcessAttestation(context.Background(), []uint64{2, 3, 4}, indexToHash(10), 5)
// The new head should be 10.
// The new validators voted for 9
f.ProcessAttestation(context.Background(), []uint64{2, 3, 4}, indexToHash(9), 5)
// The new head should be 9.
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 3")
// Set the balances of the last 2 validators to 0.
balances = []uint64{1, 1, 1, 0, 0}
// The head should be back to 9.
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 1")
// Set the balances back to normal.
balances = []uint64{1, 1, 1, 1, 1}
// The head should be back to 10.
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
// Remove the last 2 validators.
balances = []uint64{1, 1, 1}
// Set the balances back to normal.
balances = []uint64{1, 1, 1, 1, 1}
// The head should be back to 9.
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 1")
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 3")
// Remove the last 2 validators.
balances = []uint64{1, 1, 1}
// The head should be back to 10.
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
// Verify pruning below the prune threshold does not affect head.
f.store.pruneThreshold = 1000
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
prevRoot := f.store.finalizedCheckpoint.Root
f.store.finalizedCheckpoint.Root = indexToHash(5)
require.NoError(t, f.store.prune(context.Background()))
assert.Equal(t, 11, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
f.store.finalizedCheckpoint.Root = prevRoot
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
// Verify pruning above the prune threshold does prune:
// 0
@@ -298,12 +290,16 @@ func TestVotes_CanFindHead(t *testing.T) {
// / \
// 9 10
f.store.pruneThreshold = 1
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
f.store.finalizedCheckpoint.Root = indexToHash(5)
require.NoError(t, f.store.prune(context.Background()))
assert.Equal(t, 5, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
// we pruned artificially the justified root.
f.store.justifiedCheckpoint.Root = indexToHash(5)
f.store.finalizedCheckpoint.Root = prevRoot
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
// Insert new block 11 and verify head is at 11.
// 5 6
@@ -312,14 +308,14 @@ func TestVotes_CanFindHead(t *testing.T) {
// |
// 8
// / \
// 9 10
// 10 9
// |
// head-> 11
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(11), indexToHash(9), params.BeaconConfig().ZeroHash, 2, 2)
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(11), indexToHash(10), params.BeaconConfig().ZeroHash, 3, 2)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(11), r, "Incorrect head for with justified epoch at 2")
assert.Equal(t, indexToHash(11), r, "Incorrect head for with justified epoch at 3")
}

View File

@@ -15,7 +15,6 @@ type ForkChoicer interface {
HeadRetriever // to compute head.
BlockProcessor // to track new block for fork choice.
AttestationProcessor // to track new attestation for fork choice.
Pruner // to clean old data for fork choice.
Getter // to retrieve fork choice information.
Setter // to set fork choice information.
ProposerBooster // ability to boost timely-proposed block roots.
@@ -40,14 +39,8 @@ type AttestationProcessor interface {
InsertSlashedIndex(context.Context, types.ValidatorIndex)
}
// Pruner prunes the fork choice upon new finalization. This is used to keep fork choice sane.
type Pruner interface {
Prune(context.Context, [32]byte) error
}
// ProposerBooster is able to boost the proposer's root score during fork choice.
type ProposerBooster interface {
BoostProposerRoot(ctx context.Context, args *forkchoicetypes.ProposerBoostRootArgs) error
ResetBoostedProposerRoot(ctx context.Context) error
}
@@ -56,7 +49,7 @@ type Getter interface {
HasNode([32]byte) bool
ProposerBoost() [fieldparams.RootLength]byte
HasParent(root [32]byte) bool
AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([]byte, error)
AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([32]byte, error)
CommonAncestorRoot(ctx context.Context, root1 [32]byte, root2 [32]byte) ([32]byte, error)
IsCanonical(root [32]byte) bool
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
@@ -71,4 +64,6 @@ type Setter interface {
SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte) ([][32]byte, error)
UpdateJustifiedCheckpoint(*forkchoicetypes.Checkpoint) error
UpdateFinalizedCheckpoint(*forkchoicetypes.Checkpoint) error
SetGenesisTime(uint64)
SetOriginRoot([32]byte)
}

View File

@@ -31,6 +31,7 @@ go_library(
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
@@ -63,6 +64,7 @@ go_test(
"//consensus-types/wrapper:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",

View File

@@ -9,6 +9,7 @@ import (
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
@@ -29,7 +30,7 @@ func prepareForkchoiceState(
ParentRoot: parentRoot[:],
}
executionHeader := &ethpb.ExecutionPayloadHeader{
executionHeader := &enginev1.ExecutionPayloadHeader{
BlockHash: payloadHash[:],
}
@@ -55,7 +56,6 @@ func prepareForkchoiceState(
st, err := v3.InitializeFromProto(base)
return st, blockRoot, err
}
func TestFFGUpdates_OneBranch(t *testing.T) {
balances := []uint64{1, 1}
f := setup(0, 0)
@@ -104,8 +104,8 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
// 2 <- head
// |
// 3
jc := &forkchoicetypes.Checkpoint{Epoch: 1, Root: indexToHash(2)}
f.store.justifiedCheckpoint = jc
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(1), Epoch: 1}
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(0), Epoch: 0}
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head with justified epoch at 1")
@@ -118,8 +118,7 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
// 2 <- start
// |
// 3 <- head
jc = &forkchoicetypes.Checkpoint{Epoch: 2, Root: indexToHash(3)}
f.store.justifiedCheckpoint = jc
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(3), Epoch: 2}
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(3), r, "Incorrect head with justified epoch at 2")
@@ -255,8 +254,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head with justified epoch at 0")
jc := &forkchoicetypes.Checkpoint{Epoch: 1, Root: indexToHash(1)}
f.store.justifiedCheckpoint = jc
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1, Root: indexToHash(1)}
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(7), r, "Incorrect head with justified epoch at 0")
@@ -266,6 +264,7 @@ func setup(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
f := New()
f.store.nodesIndices[params.BeaconConfig().ZeroHash] = 0
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: justifiedEpoch, Root: params.BeaconConfig().ZeroHash}
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: justifiedEpoch, Root: params.BeaconConfig().ZeroHash}
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: finalizedEpoch, Root: params.BeaconConfig().ZeroHash}
f.store.nodes = append(f.store.nodes, &Node{
slot: 0,

View File

@@ -69,7 +69,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
// Insert block 5 with justified epoch of 2, verify head is still at 4.
// Insert block 5 with justified epoch of 2, verify head is 5
// 0
// / \
// 2 1
@@ -82,32 +82,6 @@ func TestNoVote_CanFindHead(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
// Verify there's an error when starting from a block with wrong justified epoch.
// 0
// / \
// 2 1
// | |
// head -> 4 3
// |
// 5 <- starting from 5 with justified epoch 0 should error
f.store.justifiedCheckpoint.Root = indexToHash(5)
_, err = f.Head(context.Background(), balances)
wanted := "head at slot 0 with weight 0 is not eligible, finalizedEpoch 1 != 1, justifiedEpoch 2 != 1"
require.ErrorContains(t, wanted, err)
// Set the justified epoch to 2 and start block to 5 to verify head is 5.
// 0
// / \
// 2 1
// | |
// 4 3
// |
// 5 <- head
f.store.justifiedCheckpoint.Epoch = 2
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 2")
// Insert block 6 with justified epoch of 2, verify head is at 6.

View File

@@ -4,36 +4,9 @@ import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/config/params"
)
// BoostProposerRoot sets the block root which should be boosted during
// the LMD fork choice algorithm calculations. This is meant to reward timely,
// proposed blocks which occur before a cutoff interval set to
// SECONDS_PER_SLOT // INTERVALS_PER_SLOT.
//
// time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
// is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
// if get_current_slot(store) == block.slot and is_before_attesting_interval:
// store.proposer_boost_root = hash_tree_root(block)
func (f *ForkChoice) BoostProposerRoot(_ context.Context, args *types.ProposerBoostRootArgs) error {
if args == nil {
return errors.New("nil function args provided to BoostProposerRoot")
}
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
isBeforeAttestingInterval := args.SecondsIntoSlot < secondsPerSlot/params.BeaconConfig().IntervalsPerSlot
// Only update the boosted proposer root to the incoming block root
// if the block is for the current, clock-based slot and the block was timely.
if args.CurrentSlot == args.BlockSlot && isBeforeAttestingInterval {
f.store.proposerBoostLock.Lock()
f.store.proposerBoostRoot = args.BlockRoot
f.store.proposerBoostLock.Unlock()
}
return nil
}
// ResetBoostedProposerRoot sets the value of the proposer boosted root to zeros.
func (f *ForkChoice) ResetBoostedProposerRoot(_ context.Context) error {
f.store.proposerBoostLock.Lock()
@@ -64,9 +37,7 @@ func computeProposerBoostScore(validatorBalances []uint64) (score uint64, err er
err = errors.New("no active validators")
return
}
avgBalance := totalActiveBalance / numActive
committeeSize := numActive / uint64(params.BeaconConfig().SlotsPerEpoch)
committeeWeight := committeeSize * avgBalance
committeeWeight := totalActiveBalance / uint64(params.BeaconConfig().SlotsPerEpoch)
score = (committeeWeight * params.BeaconConfig().ProposerScoreBoost) / 100
return
}

View File

@@ -5,13 +5,18 @@ import (
"testing"
"time"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
// Helper function to simulate the block being on time or delayed for proposer
// boost. It alters the genesisTime tracked by the store.
func driftGenesisTime(f *ForkChoice, slot types.Slot, delay uint64) {
f.SetGenesisTime(uint64(time.Now().Unix()) - uint64(slot)*params.BeaconConfig().SecondsPerSlot - delay)
}
// Simple, ex-ante attack mitigation using proposer boost.
// In a nutshell, an adversarial block proposer in slot n+1 keeps its proposal hidden.
// The honest block proposer in slot n+2 will then propose an honest block. The
@@ -26,11 +31,6 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
for i := 0; i < len(balances); i++ {
balances[i] = 10
}
t.Run("nil args check", func(t *testing.T) {
f := setup(jEpoch, fEpoch)
err := f.BoostProposerRoot(ctx, nil)
require.ErrorContains(t, "nil function args", err)
})
t.Run("back-propagates boost score to ancestors after proposer boosting", func(t *testing.T) {
f := setup(jEpoch, fEpoch)
@@ -44,6 +44,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// |
// 1 <- HEAD
slot := types.Slot(1)
driftGenesisTime(f, slot, 0)
newRoot := indexToHash(1)
state, blkRoot, err := prepareForkchoiceState(
ctx,
@@ -68,6 +69,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// |
// 2 <- HEAD
slot = types.Slot(2)
driftGenesisTime(f, slot, 0)
newRoot = indexToHash(2)
state, blkRoot, err = prepareForkchoiceState(
ctx,
@@ -94,6 +96,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// |
// 3 <- HEAD
slot = types.Slot(3)
driftGenesisTime(f, slot, 0)
newRoot = indexToHash(3)
state, blkRoot, err = prepareForkchoiceState(
ctx,
@@ -121,6 +124,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// 3 |
// 4 <- HEAD
slot = types.Slot(4)
driftGenesisTime(f, slot, 0)
newRoot = indexToHash(4)
state, blkRoot, err = prepareForkchoiceState(
ctx,
@@ -134,14 +138,6 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
clockSlot := types.Slot(4)
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: newRoot,
BlockSlot: slot,
CurrentSlot: clockSlot,
SecondsIntoSlot: 0,
}
require.NoError(t, f.BoostProposerRoot(ctx, args))
headRoot, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
@@ -153,29 +149,17 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// Ancestors have the added weights of their children. Genesis is a special exception at 0 weight,
require.Equal(t, f.store.nodes[0].weight, uint64(0))
// Otherwise, assuming a block, A, that is not-genesis:
//
// A -> B -> C
//
//Where each one has a weight of 10 individually, the final weights will look like
//
// (A: 30) -> (B: 20) -> (C: 10)
//
// The boost adds 14 to the weight, so if C is boosted, we would have
//
// (A: 38) -> (B: 28) -> (C: 18)
//
// In this case, we have a small fork:
// Proposer boost score with this tests parameters is 8
// Each of the nodes received one attestation accounting for 10.
// Node D is the only one with a proposer boost still applied:
//
// (A: 48) -> (B: 38) -> (C: 10)
// \_->(D: 18)
// \--------------->(D: 18)
//
// So B has its own weight, 10, and the sum of both C and D. That's why we see weight 54 in the
// middle instead of the normal progression of (48 -> 38 -> 18).
require.Equal(t, f.store.nodes[4].weight, uint64(18))
require.Equal(t, f.store.nodes[1].weight, uint64(48))
require.Equal(t, f.store.nodes[2].weight, uint64(38))
require.Equal(t, f.store.nodes[3].weight, uint64(10))
require.Equal(t, f.store.nodes[4].weight, uint64(18))
// Regression: process attestations for C, check that it
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight
@@ -201,6 +185,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// \
// C <- Slot 2 HEAD
honestBlockSlot := types.Slot(2)
driftGenesisTime(f, honestBlockSlot, 0)
honestBlock := indexToHash(2)
state, blkRoot, err := prepareForkchoiceState(
ctx,
@@ -236,15 +221,6 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
// We boost the honest proposal at slot 2.
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: honestBlock,
BlockSlot: honestBlockSlot,
CurrentSlot: types.Slot(2),
SecondsIntoSlot: 0,
}
require.NoError(t, f.BoostProposerRoot(ctx, args))
// The maliciously withheld block has one vote.
votes := []uint64{1}
f.ProcessAttestation(ctx, votes, maliciouslyWithheldBlock, fEpoch)
@@ -270,10 +246,11 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// C builds on A, as proposer at slot 1 did not reveal B.
// A
// / \
// (B?) \
// (B?) \
// \
// C <- Slot 2 HEAD
honestBlockSlot := types.Slot(2)
driftGenesisTime(f, honestBlockSlot, 0)
honestBlock := indexToHash(2)
state, blkRoot, err := prepareForkchoiceState(
ctx,
@@ -311,15 +288,6 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
// We boost the honest proposal at slot 2.
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: honestBlock,
BlockSlot: honestBlockSlot,
CurrentSlot: types.Slot(2),
SecondsIntoSlot: 0,
}
require.NoError(t, f.BoostProposerRoot(ctx, args))
// An attestation is received for B that has more voting power than C with the proposer boost,
// allowing B to then become the head if their attestation has enough adversarial votes.
votes := []uint64{1, 2}
@@ -353,6 +321,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
assert.Equal(t, zeroHash, r, "Incorrect head with genesis")
cSlot := types.Slot(2)
driftGenesisTime(f, cSlot, 0)
c := indexToHash(2)
state, blkRoot, err := prepareForkchoiceState(
ctx,
@@ -371,15 +340,6 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, c, r, "Incorrect head for justified epoch at slot 2")
// We boost C.
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: c,
BlockSlot: cSlot,
CurrentSlot: types.Slot(2),
SecondsIntoSlot: 0,
}
require.NoError(t, f.BoostProposerRoot(ctx, args))
bSlot := types.Slot(1)
b := indexToHash(1)
state, blkRoot, err = prepareForkchoiceState(
@@ -423,101 +383,74 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, c, r, "Expected C to remain the head")
// Block D receives the boost.
votes = []uint64{2}
f.ProcessAttestation(ctx, votes, d, fEpoch)
args = &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: d,
BlockSlot: dSlot,
CurrentSlot: types.Slot(3),
SecondsIntoSlot: 0,
}
require.NoError(t, f.BoostProposerRoot(ctx, args))
// If the same block arrives with boosting then it becomes head:
driftGenesisTime(f, dSlot, 0)
d2 := indexToHash(30)
state, blkRoot, err = prepareForkchoiceState(
ctx,
dSlot,
d2,
b, // parent
zeroHash,
jEpoch,
fEpoch,
)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
votes = []uint64{2}
f.ProcessAttestation(ctx, votes, d2, fEpoch)
// Ensure D becomes the head thanks to boosting.
r, err = f.Head(ctx, balances)
require.NoError(t, err)
assert.Equal(t, d, r, "Expected D to become the head")
assert.Equal(t, d2, r, "Expected D to become the head")
})
}
func TestForkChoice_BoostProposerRoot(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.SecondsPerSlot = 6
cfg.IntervalsPerSlot = 3
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
root := [32]byte{'A'}
zeroHash := [32]byte{}
t.Run("does not boost block from different slot", func(t *testing.T) {
f := &ForkChoice{
store: &Store{},
}
blockRoot := [32]byte{'A'}
// Trying to boost a block from slot 0 should not work.
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: blockRoot,
BlockSlot: types.Slot(0),
CurrentSlot: types.Slot(1),
SecondsIntoSlot: 0,
}
err := f.BoostProposerRoot(ctx, args)
f := setup(0, 0)
slot := types.Slot(0)
currentSlot := types.Slot(1)
driftGenesisTime(f, currentSlot, 0)
state, blkRoot, err := prepareForkchoiceState(ctx, slot, root, zeroHash, zeroHash, 0, 0)
require.NoError(t, err)
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.Equal(t, [32]byte{}, f.store.proposerBoostRoot)
})
t.Run("does not boost untimely block from same slot", func(t *testing.T) {
f := &ForkChoice{
store: &Store{},
}
// Genesis set to 1 slot ago + X where X > attesting interval.
attestingInterval := time.Duration(cfg.SecondsPerSlot/cfg.IntervalsPerSlot) * time.Second
greaterThanAttestingInterval := attestingInterval + time.Second
// Trying to boost a block from slot 1 that is untimely should not work.
blockRoot := [32]byte{'A'}
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: blockRoot,
BlockSlot: types.Slot(1),
CurrentSlot: 1,
SecondsIntoSlot: uint64(greaterThanAttestingInterval.Seconds()),
}
err := f.BoostProposerRoot(ctx, args)
f := setup(0, 0)
slot := types.Slot(1)
currentSlot := types.Slot(1)
driftGenesisTime(f, currentSlot, uint64(params.BeaconConfig().SecondsPerSlot-1))
state, blkRoot, err := prepareForkchoiceState(ctx, slot, root, zeroHash, zeroHash, 0, 0)
require.NoError(t, err)
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.Equal(t, [32]byte{}, f.store.proposerBoostRoot)
})
t.Run("boosts perfectly timely block from same slot", func(t *testing.T) {
f := &ForkChoice{
store: &Store{},
}
// Genesis set to 1 slot ago + 0 seconds into the attesting interval.
blockRoot := [32]byte{'A'}
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: blockRoot,
BlockSlot: types.Slot(1),
CurrentSlot: types.Slot(1),
SecondsIntoSlot: 0,
}
err := f.BoostProposerRoot(ctx, args)
f := setup(0, 0)
slot := types.Slot(1)
currentSlot := types.Slot(1)
driftGenesisTime(f, currentSlot, 0)
state, blkRoot, err := prepareForkchoiceState(ctx, slot, root, zeroHash, zeroHash, 0, 0)
require.NoError(t, err)
require.DeepEqual(t, [32]byte{'A'}, f.store.proposerBoostRoot)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.Equal(t, root, f.store.proposerBoostRoot)
})
t.Run("boosts timely block from same slot", func(t *testing.T) {
f := &ForkChoice{
store: &Store{},
}
blockRoot := [32]byte{'A'}
halfAttestingInterval := time.Second
args := &forkchoicetypes.ProposerBoostRootArgs{
BlockRoot: blockRoot,
BlockSlot: types.Slot(1),
CurrentSlot: types.Slot(1),
SecondsIntoSlot: uint64(halfAttestingInterval.Seconds()),
}
err := f.BoostProposerRoot(ctx, args)
f := setup(0, 0)
slot := types.Slot(1)
currentSlot := types.Slot(1)
driftGenesisTime(f, currentSlot, 1)
state, blkRoot, err := prepareForkchoiceState(ctx, slot, root, zeroHash, zeroHash, 0, 0)
require.NoError(t, err)
require.DeepEqual(t, [32]byte{'A'}, f.store.proposerBoostRoot)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.Equal(t, root, f.store.proposerBoostRoot)
})
}

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"fmt"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
@@ -15,8 +16,9 @@ import (
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
pmath "github.com/prysmaticlabs/prysm/math"
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -31,15 +33,16 @@ var lastHeadRoot [32]byte
// New initializes a new fork choice store.
func New() *ForkChoice {
s := &Store{
justifiedCheckpoint: &forkchoicetypes.Checkpoint{},
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
proposerBoostRoot: [32]byte{},
nodes: make([]*Node, 0),
nodesIndices: make(map[[32]byte]uint64),
payloadIndices: make(map[[32]byte]uint64),
canonicalNodes: make(map[[32]byte]bool),
slashedIndices: make(map[types.ValidatorIndex]bool),
pruneThreshold: defaultPruneThreshold,
justifiedCheckpoint: &forkchoicetypes.Checkpoint{},
bestJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
proposerBoostRoot: [32]byte{},
nodes: make([]*Node, 0),
nodesIndices: make(map[[32]byte]uint64),
payloadIndices: make(map[[32]byte]uint64),
canonicalNodes: make(map[[32]byte]bool),
slashedIndices: make(map[types.ValidatorIndex]bool),
pruneThreshold: defaultPruneThreshold,
}
b := make([]uint64, 0)
@@ -146,13 +149,60 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.ReadOnlyBeaconS
return errInvalidNilCheckpoint
}
finalizedEpoch := fc.Epoch
return f.store.insert(ctx, slot, root, parentRoot, payloadHash, justifiedEpoch, finalizedEpoch)
err := f.store.insert(ctx, slot, root, parentRoot, payloadHash, justifiedEpoch, finalizedEpoch)
if err != nil {
return err
}
return f.updateCheckpoints(ctx, jc, fc)
}
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
// root is different than the current store finalized root, and the number of the store has met prune threshold.
func (f *ForkChoice) Prune(ctx context.Context, finalizedRoot [32]byte) error {
return f.store.prune(ctx, finalizedRoot)
// updateCheckpoints update the checkpoints when inserting a new node.
func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkpoint) error {
f.store.checkpointsLock.Lock()
if jc.Epoch > f.store.justifiedCheckpoint.Epoch {
bj := f.store.bestJustifiedCheckpoint
if bj == nil || jc.Epoch > bj.Epoch {
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: bytesutil.ToBytes32(jc.Root)}
}
currentSlot := slots.CurrentSlot(f.store.genesisTime)
if slots.SinceEpochStarts(currentSlot) < params.BeaconConfig().SafeSlotsToUpdateJustified {
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: bytesutil.ToBytes32(jc.Root)}
} else {
currentJcp := f.store.justifiedCheckpoint
currentRoot := currentJcp.Root
if currentRoot == params.BeaconConfig().ZeroHash {
currentRoot = f.store.originRoot
}
jSlot, err := slots.EpochStart(currentJcp.Epoch)
if err != nil {
f.store.checkpointsLock.Unlock()
return err
}
jcRoot := bytesutil.ToBytes32(jc.Root)
root, err := f.AncestorRoot(ctx, jcRoot, jSlot)
if err != nil {
f.store.checkpointsLock.Unlock()
return err
}
if root == currentRoot {
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: jcRoot}
}
}
}
// Update finalization
if fc.Epoch <= f.store.finalizedCheckpoint.Epoch {
f.store.checkpointsLock.Unlock()
return nil
}
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: fc.Epoch,
Root: bytesutil.ToBytes32(fc.Root)}
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
Root: bytesutil.ToBytes32(jc.Root)}
f.store.checkpointsLock.Unlock()
return f.store.prune(ctx)
}
// HasNode returns true if the node exists in fork choice store,
@@ -188,7 +238,7 @@ func (f *ForkChoice) IsCanonical(root [32]byte) bool {
}
// AncestorRoot returns the ancestor root of input block root at a given slot.
func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([]byte, error) {
func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "protoArray.AncestorRoot")
defer span.End()
@@ -197,25 +247,25 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
i, ok := f.store.nodesIndices[root]
if !ok {
return nil, errors.New("node does not exist")
return [32]byte{}, errors.New("node does not exist")
}
if i >= uint64(len(f.store.nodes)) {
return nil, errors.New("node index out of range")
return [32]byte{}, errors.New("node index out of range")
}
for f.store.nodes[i].slot > slot {
if ctx.Err() != nil {
return nil, ctx.Err()
return [32]byte{}, ctx.Err()
}
i = f.store.nodes[i].parent
if i >= uint64(len(f.store.nodes)) {
return nil, errors.New("node index out of range")
return [32]byte{}, errors.New("node index out of range")
}
}
return f.store.nodes[i].root[:], nil
return f.store.nodes[i].root, nil
}
// CommonAncestorRoot returns the common ancestor root between the two block roots r1 and r2.
@@ -296,6 +346,9 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.head")
defer span.End()
s.checkpointsLock.RLock()
defer s.checkpointsLock.RUnlock()
// Justified index has to be valid in node indices map, and can not be out of bound.
if s.justifiedCheckpoint == nil {
return [32]byte{}, errInvalidNilCheckpoint
@@ -303,7 +356,14 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
justifiedIndex, ok := s.nodesIndices[s.justifiedCheckpoint.Root]
if !ok {
return [32]byte{}, errUnknownJustifiedRoot
// If the justifiedCheckpoint is from genesis, then the root is
// zeroHash. In this case it should be the root of forkchoice
// tree.
if s.justifiedCheckpoint.Epoch == params.BeaconConfig().GenesisEpoch {
justifiedIndex = uint64(0)
} else {
return [32]byte{}, errUnknownJustifiedRoot
}
}
if justifiedIndex >= uint64(len(s.nodes)) {
return [32]byte{}, errInvalidJustifiedIndex
@@ -425,6 +485,20 @@ func (s *Store) insert(ctx context.Context,
s.payloadIndices[payloadHash] = index
s.nodes = append(s.nodes, n)
// Apply proposer boost
timeNow := uint64(time.Now().Unix())
if timeNow < s.genesisTime {
return nil
}
secondsIntoSlot := (timeNow - s.genesisTime) % params.BeaconConfig().SecondsPerSlot
currentSlot := slots.CurrentSlot(s.genesisTime)
boostTreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
if currentSlot == slot && secondsIntoSlot < boostTreshold {
s.proposerBoostLock.Lock()
s.proposerBoostRoot = root
s.proposerBoostLock.Unlock()
}
// Update parent with the best child and descendant only if it's available.
if n.parent != NonExistentNode {
if err := s.updateBestChildAndDescendant(parentIndex, index); err != nil {
@@ -649,17 +723,18 @@ func (s *Store) updateBestChildAndDescendant(parentIndex, childIndex uint64) err
}
// prune prunes the store with the new finalized root. The tree is only
// pruned if the input finalized root are different than the one in stored and
// the number of the nodes in store has met prune threshold.
func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
// pruned if the number of the nodes in store has met prune threshold.
func (s *Store) prune(ctx context.Context) error {
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.prune")
defer span.End()
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
s.checkpointsLock.RLock()
finalizedRoot := s.finalizedCheckpoint.Root
s.checkpointsLock.RUnlock()
// The node would have seen finalized root or else it
// wouldn't be able to prune it.
// Protection against invalid checkpoint
finalizedIndex, ok := s.nodesIndices[finalizedRoot]
if !ok {
return errUnknownFinalizedRoot
@@ -776,10 +851,10 @@ func (f *ForkChoice) Tips() ([][32]byte, []types.Slot) {
return headsRoots, headsSlots
}
func (f *ForkChoice) ForkChoiceNodes() []*pbrpc.ForkChoiceNode {
func (f *ForkChoice) ForkChoiceNodes() []*ethpb.ForkChoiceNode {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
ret := make([]*pbrpc.ForkChoiceNode, len(f.store.nodes))
ret := make([]*ethpb.ForkChoiceNode, len(f.store.nodes))
var parentRoot [32]byte
for i, node := range f.store.nodes {
root := node.Root()
@@ -799,7 +874,7 @@ func (f *ForkChoice) ForkChoiceNodes() []*pbrpc.ForkChoiceNode {
bestDescendantRoot = bestDescendantNode.Root()
}
ret[i] = &pbrpc.ForkChoiceNode{
ret[i] = &ethpb.ForkChoiceNode{
Slot: node.Slot(),
Root: root[:],
Parent: parentRoot[:],
@@ -869,6 +944,10 @@ func (f *ForkChoice) UpdateJustifiedCheckpoint(jc *forkchoicetypes.Checkpoint) e
f.store.checkpointsLock.Lock()
defer f.store.checkpointsLock.Unlock()
f.store.justifiedCheckpoint = jc
bj := f.store.bestJustifiedCheckpoint
if bj == nil || jc.Epoch > bj.Epoch {
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch, Root: jc.Root}
}
return nil
}
@@ -905,3 +984,13 @@ func (f *ForkChoice) InsertOptimisticChain(ctx context.Context, chain []*forkcho
}
return nil
}
// SetGenesisTime sets the genesisTime tracked by forkchoice
func (f *ForkChoice) SetGenesisTime(genesisTime uint64) {
f.store.genesisTime = genesisTime
}
// SetOriginRoot sets the genesis block root
func (f *ForkChoice) SetOriginRoot(root [32]byte) {
f.store.originRoot = root
}

View File

@@ -3,13 +3,13 @@ package protoarray
import (
"context"
"testing"
"time"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
@@ -51,7 +51,7 @@ func TestForkChoice_HasNode(t *testing.T) {
func TestStore_Head_UnknownJustifiedRoot(t *testing.T) {
s := &Store{nodesIndices: make(map[[32]byte]uint64)}
s.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 0, Root: [32]byte{'a'}}
s.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'a'}}
_, err := s.head(context.Background())
assert.ErrorContains(t, errUnknownJustifiedRoot.Error(), err)
@@ -367,7 +367,9 @@ func TestStore_Prune_LessThanThreshold(t *testing.T) {
// Finalized root is at index 99 so everything before 99 should be pruned,
// but PruneThreshold is at 100 so nothing will be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
fc := &forkchoicetypes.Checkpoint{Epoch: 3, Root: indexToHash(99)}
s.finalizedCheckpoint = fc
require.NoError(t, s.prune(context.Background()))
assert.Equal(t, 100, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 100, len(s.nodesIndices), "Incorrect node indices count")
}
@@ -406,7 +408,9 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
s := &Store{nodes: nodes, nodesIndices: indices, canonicalNodes: map[[32]byte]bool{}, payloadIndices: map[[32]byte]uint64{}}
// Finalized root is at index 99 so everything before 99 should be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
fc := &forkchoicetypes.Checkpoint{Epoch: 3, Root: indexToHash(99)}
s.finalizedCheckpoint = fc
require.NoError(t, s.prune(context.Background()))
assert.Equal(t, 1, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 1, len(s.nodesIndices), "Incorrect node indices count")
}
@@ -444,12 +448,15 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
s := &Store{nodes: nodes, nodesIndices: indices, canonicalNodes: map[[32]byte]bool{}, payloadIndices: map[[32]byte]uint64{}}
// Finalized root is at index 11 so everything before 11 should be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(10)))
fc := &forkchoicetypes.Checkpoint{Epoch: 1, Root: indexToHash(10)}
s.finalizedCheckpoint = fc
require.NoError(t, s.prune(context.Background()))
assert.Equal(t, 90, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 90, len(s.nodesIndices), "Incorrect node indices count")
// One more time.
require.NoError(t, s.prune(context.Background(), indexToHash(20)))
s.finalizedCheckpoint.Root = indexToHash(20)
require.NoError(t, s.prune(context.Background()))
assert.Equal(t, 80, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 80, len(s.nodesIndices), "Incorrect node indices count")
}
@@ -507,7 +514,9 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
[32]byte{'C'}: 2,
},
}
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1))))
fc := &forkchoicetypes.Checkpoint{Epoch: 1, Root: indexToHash(1)}
s.finalizedCheckpoint = fc
require.NoError(t, s.prune(context.Background()))
require.Equal(t, 1, len(s.nodes))
require.Equal(t, 1, len(s.nodesIndices))
require.Equal(t, 1, len(s.canonicalNodes))
@@ -610,7 +619,8 @@ func TestStore_PruneBranched(t *testing.T) {
require.Equal(t, true, f.IsCanonical([32]byte{'a'}))
require.Equal(t, true, f.IsCanonical([32]byte{'f'}))
require.NoError(t, f.Prune(ctx, tc.finalizedRoot))
f.store.finalizedCheckpoint.Root = tc.finalizedRoot
require.NoError(t, f.store.prune(ctx))
require.Equal(t, tc.canonicalCount, len(f.store.canonicalNodes))
require.Equal(t, true, f.IsCanonical(tc.wantedCanonical))
require.Equal(t, false, f.IsCanonical(tc.wantedNonCanonical))
@@ -899,10 +909,10 @@ func TestStore_AncestorRoot(t *testing.T) {
r, err := f.AncestorRoot(ctx, [32]byte{'c'}, 1)
require.NoError(t, err)
assert.Equal(t, bytesutil.ToBytes32(r), [32]byte{'a'})
assert.Equal(t, r, [32]byte{'a'})
r, err = f.AncestorRoot(ctx, [32]byte{'c'}, 2)
require.NoError(t, err)
assert.Equal(t, bytesutil.ToBytes32(r), [32]byte{'b'})
assert.Equal(t, r, [32]byte{'b'})
}
func TestStore_AncestorRootOutOfBound(t *testing.T) {
@@ -1114,3 +1124,150 @@ func TestStore_InsertOptimisticChain(t *testing.T) {
f = setup(1, 1)
require.NoError(t, f.InsertOptimisticChain(context.Background(), args[2:]))
}
func TestForkChoice_UpdateCheckpoints(t *testing.T) {
ctx := context.Background()
tests := []struct {
name string
justified *forkchoicetypes.Checkpoint
bestJustified *forkchoicetypes.Checkpoint
finalized *forkchoicetypes.Checkpoint
newJustified *forkchoicetypes.Checkpoint
newFinalized *forkchoicetypes.Checkpoint
wantedJustified *forkchoicetypes.Checkpoint
wantedBestJustified *forkchoicetypes.Checkpoint
wantedFinalized *forkchoicetypes.Checkpoint
currentSlot types.Slot
wantedErr string
}{
{
name: "lower than store justified and finalized",
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
newJustified: &forkchoicetypes.Checkpoint{Epoch: 1},
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 0},
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
},
{
name: "higher than store justified, early slot, direct descendant",
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'g'}},
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
},
{
name: "higher than store justified, early slot, not a descendant",
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'g'}},
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
},
{
name: "higher than store justified, late slot, descendant",
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'g'}},
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
currentSlot: params.BeaconConfig().SafeSlotsToUpdateJustified.Add(1),
},
{
name: "higher than store justified, late slot, not descendant",
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'g'}},
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
currentSlot: params.BeaconConfig().SafeSlotsToUpdateJustified.Add(1),
},
{
name: "higher than store finalized, late slot, not descendant",
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'h'}},
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'h'}},
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
currentSlot: params.BeaconConfig().SafeSlotsToUpdateJustified.Add(1),
},
{
name: "Unknown checkpoint root, late slot",
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'d'}},
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'h'}},
currentSlot: params.BeaconConfig().SafeSlotsToUpdateJustified.Add(1),
wantedErr: "node does not exist",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fcs := setup(tt.justified.Epoch, tt.finalized.Epoch)
fcs.store.justifiedCheckpoint = tt.justified
fcs.store.finalizedCheckpoint = tt.finalized
fcs.store.bestJustifiedCheckpoint = tt.bestJustified
fcs.store.genesisTime = uint64(time.Now().Unix()) - uint64(tt.currentSlot)*params.BeaconConfig().SecondsPerSlot
state, blkRoot, err := prepareForkchoiceState(ctx, 32, [32]byte{'f'},
[32]byte{}, [32]byte{}, tt.finalized.Epoch, tt.finalized.Epoch)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 64, [32]byte{'j'},
[32]byte{'f'}, [32]byte{}, tt.justified.Epoch, tt.finalized.Epoch)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 96, [32]byte{'b'},
[32]byte{'j'}, [32]byte{}, tt.newJustified.Epoch, tt.newFinalized.Epoch)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 96, [32]byte{'c'},
[32]byte{'f'}, [32]byte{}, tt.newJustified.Epoch, tt.newFinalized.Epoch)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 65, [32]byte{'h'},
[32]byte{'f'}, [32]byte{}, tt.newFinalized.Epoch, tt.newFinalized.Epoch)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
// restart justifications cause insertion messed it up
// restart justifications cause insertion messed it up
fcs.store.justifiedCheckpoint = tt.justified
fcs.store.finalizedCheckpoint = tt.finalized
fcs.store.bestJustifiedCheckpoint = tt.bestJustified
jc := &ethpb.Checkpoint{Epoch: tt.newJustified.Epoch, Root: tt.newJustified.Root[:]}
fc := &ethpb.Checkpoint{Epoch: tt.newFinalized.Epoch, Root: tt.newFinalized.Root[:]}
err = fcs.updateCheckpoints(ctx, jc, fc)
if len(tt.wantedErr) > 0 {
require.ErrorContains(t, tt.wantedErr, err)
} else {
require.NoError(t, err)
require.Equal(t, tt.wantedJustified.Epoch, fcs.store.justifiedCheckpoint.Epoch)
require.Equal(t, tt.wantedFinalized.Epoch, fcs.store.finalizedCheckpoint.Epoch)
require.Equal(t, tt.wantedJustified.Root, fcs.store.justifiedCheckpoint.Root)
require.Equal(t, tt.wantedFinalized.Root, fcs.store.finalizedCheckpoint.Root)
require.Equal(t, tt.wantedBestJustified.Epoch, fcs.store.bestJustifiedCheckpoint.Epoch)
require.Equal(t, tt.wantedBestJustified.Root, fcs.store.bestJustifiedCheckpoint.Root)
}
})
}
}

View File

@@ -20,6 +20,7 @@ type ForkChoice struct {
type Store struct {
pruneThreshold uint64 // do not prune tree unless threshold is reached.
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified checkpoint in store.
bestJustifiedCheckpoint *forkchoicetypes.Checkpoint // best justified checkpoint in store.
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized checkpoint in store.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
@@ -29,9 +30,11 @@ type Store struct {
canonicalNodes map[[fieldparams.RootLength]byte]bool // the canonical block nodes.
payloadIndices map[[fieldparams.RootLength]byte]uint64 // the payload hash of block node and the index in the list
slashedIndices map[types.ValidatorIndex]bool // The list of equivocating validators
originRoot [fieldparams.RootLength]byte // The genesis block root
nodesLock sync.RWMutex
proposerBoostLock sync.RWMutex
checkpointsLock sync.RWMutex
genesisTime uint64
}
// Node defines the individual block which includes its block parent, ancestor and how much weight accounted for it.

View File

@@ -4,7 +4,6 @@ import (
"context"
"testing"
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
@@ -114,7 +113,7 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
// Insert block 5 with justified epoch 2, it should be filtered out:
// Insert block 5 with justified epoch 2, it becomes head
// 0
// / \
// 2 1
@@ -130,9 +129,9 @@ func TestVotes_CanFindHead(t *testing.T) {
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 1")
// Insert block 6 with justified epoch 0:
// Insert block 6 with justified epoch 3: verify it's head
// 0
// / \
// 2 1
@@ -141,28 +140,18 @@ func TestVotes_CanFindHead(t *testing.T) {
// |
// 4 <- head
// / \
// 5 6 <- justified epoch = 0
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 1, 1)
// 5 6 <- justified epoch = 3
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 3, 2)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 1")
// Moved 2 votes to block 5:
// 0
// / \
// 2 1
// |
// 3
// |
// 4
// / \
// 2 votes-> 5 6
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(5), 4)
// Inset blocks 7, 8 and 9:
// Inset blocks 7 and 8
// 6 should still be the head, even though 5 has all the votes.
// 0
// / \
@@ -177,23 +166,17 @@ func TestVotes_CanFindHead(t *testing.T) {
// 7
// |
// 8
// |
// 9
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(7), indexToHash(5), params.BeaconConfig().ZeroHash, 2, 2)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(8), indexToHash(7), params.BeaconConfig().ZeroHash, 2, 2)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(9), indexToHash(8), params.BeaconConfig().ZeroHash, 2, 2)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 1")
// Update fork choice justified epoch to 1 and start block to 5.
// Insert block 9 with justified epoch 3, it becomes head
// Verify 9 is the head:
// 0
// / \
@@ -209,17 +192,15 @@ func TestVotes_CanFindHead(t *testing.T) {
// |
// 8
// |
// 9 <- head
jc := &forkchoicetypes.Checkpoint{Epoch: 2, Root: indexToHash(5)}
fc := &forkchoicetypes.Checkpoint{Epoch: 2, Root: indexToHash(5)}
f.store.justifiedCheckpoint = jc
f.store.finalizedCheckpoint = fc
// 10 <- head
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(10), indexToHash(8), params.BeaconConfig().ZeroHash, 3, 2)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
// Insert block 10 and 2 validators updated their vote to 9.
// Verify 9 is the head:
// Insert block 9 forking 10 verify it's head (lexicographic order)
// 0
// / \
// 2 1
@@ -234,54 +215,63 @@ func TestVotes_CanFindHead(t *testing.T) {
// |
// 8
// / \
// 2 votes->9 10
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(9), 5)
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(10), indexToHash(8), params.BeaconConfig().ZeroHash, 2, 2)
// 9 10
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(9), indexToHash(8), params.BeaconConfig().ZeroHash, 3, 2)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 3")
// Move two votes for 10, verify it's head
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(10), 5)
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
// Add 3 more validators to the system.
balances = []uint64{1, 1, 1, 1, 1}
// The new validators voted for 10.
f.ProcessAttestation(context.Background(), []uint64{2, 3, 4}, indexToHash(10), 5)
// The new head should be 10.
// The new validators voted for 9
f.ProcessAttestation(context.Background(), []uint64{2, 3, 4}, indexToHash(9), 5)
// The new head should be 9.
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 3")
// Set the balances of the last 2 validators to 0.
balances = []uint64{1, 1, 1, 0, 0}
// The head should be back to 9.
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 1")
// Set the balances back to normal.
balances = []uint64{1, 1, 1, 1, 1}
// The head should be back to 10.
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
// Remove the last 2 validators.
balances = []uint64{1, 1, 1}
// Set the balances back to normal.
balances = []uint64{1, 1, 1, 1, 1}
// The head should be back to 9.
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 1")
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 3")
// Remove the last 2 validators.
balances = []uint64{1, 1, 1}
// The head should be back to 10.
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
// Verify pruning below the prune threshold does not affect head.
f.store.pruneThreshold = 1000
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
prevRoot := f.store.finalizedCheckpoint.Root
f.store.finalizedCheckpoint.Root = indexToHash(5)
require.NoError(t, f.store.prune(context.Background()))
assert.Equal(t, 11, len(f.store.nodes), "Incorrect nodes length after prune")
f.store.finalizedCheckpoint.Root = prevRoot
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
// Verify pruning above the prune threshold does prune:
// 0
@@ -300,12 +290,16 @@ func TestVotes_CanFindHead(t *testing.T) {
// / \
// 9 10
f.store.pruneThreshold = 1
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
f.store.finalizedCheckpoint.Root = indexToHash(5)
require.NoError(t, f.store.prune(context.Background()))
assert.Equal(t, 5, len(f.store.nodes), "Incorrect nodes length after prune")
// we pruned artificially the justified root.
f.store.justifiedCheckpoint.Root = indexToHash(5)
f.store.finalizedCheckpoint.Root = prevRoot
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
// Insert new block 11 and verify head is at 11.
// 5 6
@@ -314,14 +308,14 @@ func TestVotes_CanFindHead(t *testing.T) {
// |
// 8
// / \
// 9 10
// 10 9
// |
// head-> 11
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(11), indexToHash(9), params.BeaconConfig().ZeroHash, 2, 2)
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(11), indexToHash(10), params.BeaconConfig().ZeroHash, 3, 2)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(11), r, "Incorrect head for with justified epoch at 2")
assert.Equal(t, indexToHash(11), r, "Incorrect head for with justified epoch at 3")
}

View File

@@ -62,6 +62,7 @@ go_library(
"//runtime/prereqs:go_default_library",
"//runtime/version:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ferranbt_fastssz//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -4,9 +4,11 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
fastssz "github.com/ferranbt/fastssz"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/cmd"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
tracing2 "github.com/prysmaticlabs/prysm/monitoring/tracing"
@@ -171,3 +173,9 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
c.DefaultFeeRecipient = checksumAddress
return params.SetActive(c)
}
func configureFastSSZHashingAlgorithm() {
if features.Get().EnableVectorizedHTR {
fastssz.EnableVectorizedHTR = true
}
}

View File

@@ -149,6 +149,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
if err := configureExecutionSetting(cliCtx); err != nil {
return nil, err
}
configureFastSSZHashingAlgorithm()
// Initializes any forks here.
params.BeaconConfig().InitializeForkSchedule()
@@ -248,6 +249,11 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
return nil, err
}
log.Debugln("Registering builder service")
if err := beacon.registerBuilderService(); err != nil {
return nil, err
}
log.Debugln("Registering RPC Service")
if err := beacon.registerRPCService(); err != nil {
return nil, err
@@ -568,6 +574,14 @@ func (b *BeaconNode) fetchP2P() p2p.P2P {
return p
}
func (b *BeaconNode) fetchBuilderService() *builder.Service {
var s *builder.Service
if err := b.services.FetchService(&s); err != nil {
panic(err)
}
return s
}
func (b *BeaconNode) registerAttestationPool() error {
s, err := attestations.NewService(b.ctx, &attestations.Config{
Pool: b.attestationPool,
@@ -970,3 +984,19 @@ func (b *BeaconNode) registerValidatorMonitorService() error {
}
return b.services.RegisterService(svc)
}
func (b *BeaconNode) registerBuilderService() error {
var chainService *blockchain.Service
if err := b.services.FetchService(&chainService); err != nil {
return err
}
opts := append(b.serviceFlagOpts.builderOpts,
builder.WithHeadFetcher(chainService),
builder.WithDatabase(b.db))
svc, err := builder.NewService(b.ctx, opts...)
if err != nil {
return err
}
return b.services.RegisterService(svc)
}

View File

@@ -293,20 +293,6 @@ func (s *Service) processPastLogs(ctx context.Context) error {
}
logCount := binary.LittleEndian.Uint64(rawLogCount)
// Batch request the desired headers and store them in a
// map for quick access.
requestHeaders := func(startBlk uint64, endBlk uint64) error {
headers, err := s.batchRequestHeaders(startBlk, endBlk)
if err != nil {
return err
}
for _, h := range headers {
if h != nil && h.Number != nil {
headersMap[h.Number.Uint64()] = h
}
}
return nil
}
latestFollowHeight, err := s.followedBlockHeight(ctx)
if err != nil {
return err
@@ -316,77 +302,10 @@ func (s *Service) processPastLogs(ctx context.Context) error {
additiveFactor := uint64(float64(batchSize) * additiveFactorMultiplier)
for currentBlockNum < latestFollowHeight {
start := currentBlockNum
end := currentBlockNum + batchSize
// Appropriately bound the request, as we do not
// want request blocks beyond the current follow distance.
if end > latestFollowHeight {
end = latestFollowHeight
}
query := ethereum.FilterQuery{
Addresses: []common.Address{
s.cfg.depositContractAddr,
},
FromBlock: big.NewInt(0).SetUint64(start),
ToBlock: big.NewInt(0).SetUint64(end),
}
remainingLogs := logCount - uint64(s.lastReceivedMerkleIndex+1)
// only change the end block if the remaining logs are below the required log limit.
// reset our query and end block in this case.
withinLimit := remainingLogs < depositLogRequestLimit
aboveFollowHeight := end >= latestFollowHeight
if withinLimit && aboveFollowHeight {
query.ToBlock = big.NewInt(0).SetUint64(latestFollowHeight)
end = latestFollowHeight
}
logs, err := s.httpLogger.FilterLogs(ctx, query)
currentBlockNum, batchSize, err = s.processBlockInBatch(ctx, currentBlockNum, latestFollowHeight, batchSize, additiveFactor, logCount, headersMap)
if err != nil {
if tooMuchDataRequestedError(err) {
if batchSize == 0 {
return errors.New("batch size is zero")
}
// multiplicative decrease
batchSize /= multiplicativeDecreaseDivisor
continue
}
return err
}
// Only request headers before chainstart to correctly determine
// genesis.
if !s.chainStartData.Chainstarted {
if err := requestHeaders(start, end); err != nil {
return err
}
}
for _, filterLog := range logs {
if filterLog.BlockNumber > currentBlockNum {
if err := s.checkHeaderRange(ctx, currentBlockNum, filterLog.BlockNumber-1, headersMap, requestHeaders); err != nil {
return err
}
// set new block number after checking for chainstart for previous block.
s.latestEth1DataLock.Lock()
s.latestEth1Data.LastRequestedBlock = currentBlockNum
s.latestEth1DataLock.Unlock()
currentBlockNum = filterLog.BlockNumber
}
if err := s.ProcessLog(ctx, filterLog); err != nil {
return err
}
}
if err := s.checkHeaderRange(ctx, currentBlockNum, end, headersMap, requestHeaders); err != nil {
return err
}
currentBlockNum = end
if batchSize < s.cfg.eth1HeaderReqLimit {
// update the batchSize with additive increase
batchSize += additiveFactor
if batchSize > s.cfg.eth1HeaderReqLimit {
batchSize = s.cfg.eth1HeaderReqLimit
}
}
}
s.latestEth1DataLock.Lock()
@@ -424,6 +343,96 @@ func (s *Service) processPastLogs(ctx context.Context) error {
return nil
}
func (s *Service) processBlockInBatch(ctx context.Context, currentBlockNum uint64, latestFollowHeight uint64, batchSize uint64, additiveFactor uint64, logCount uint64, headersMap map[uint64]*gethTypes.Header) (uint64, uint64, error) {
// Batch request the desired headers and store them in a
// map for quick access.
requestHeaders := func(startBlk uint64, endBlk uint64) error {
headers, err := s.batchRequestHeaders(startBlk, endBlk)
if err != nil {
return err
}
for _, h := range headers {
if h != nil && h.Number != nil {
headersMap[h.Number.Uint64()] = h
}
}
return nil
}
start := currentBlockNum
end := currentBlockNum + batchSize
// Appropriately bound the request, as we do not
// want request blocks beyond the current follow distance.
if end > latestFollowHeight {
end = latestFollowHeight
}
query := ethereum.FilterQuery{
Addresses: []common.Address{
s.cfg.depositContractAddr,
},
FromBlock: big.NewInt(0).SetUint64(start),
ToBlock: big.NewInt(0).SetUint64(end),
}
remainingLogs := logCount - uint64(s.lastReceivedMerkleIndex+1)
// only change the end block if the remaining logs are below the required log limit.
// reset our query and end block in this case.
withinLimit := remainingLogs < depositLogRequestLimit
aboveFollowHeight := end >= latestFollowHeight
if withinLimit && aboveFollowHeight {
query.ToBlock = big.NewInt(0).SetUint64(latestFollowHeight)
end = latestFollowHeight
}
logs, err := s.httpLogger.FilterLogs(ctx, query)
if err != nil {
if tooMuchDataRequestedError(err) {
if batchSize == 0 {
return 0, 0, errors.New("batch size is zero")
}
// multiplicative decrease
batchSize /= multiplicativeDecreaseDivisor
return currentBlockNum, batchSize, nil
}
return 0, 0, err
}
// Only request headers before chainstart to correctly determine
// genesis.
if !s.chainStartData.Chainstarted {
if err := requestHeaders(start, end); err != nil {
return 0, 0, err
}
}
for _, filterLog := range logs {
if filterLog.BlockNumber > currentBlockNum {
if err := s.checkHeaderRange(ctx, currentBlockNum, filterLog.BlockNumber-1, headersMap, requestHeaders); err != nil {
return 0, 0, err
}
// set new block number after checking for chainstart for previous block.
s.latestEth1DataLock.Lock()
s.latestEth1Data.LastRequestedBlock = currentBlockNum
s.latestEth1DataLock.Unlock()
currentBlockNum = filterLog.BlockNumber
}
if err := s.ProcessLog(ctx, filterLog); err != nil {
return 0, 0, err
}
}
if err := s.checkHeaderRange(ctx, currentBlockNum, end, headersMap, requestHeaders); err != nil {
return 0, 0, err
}
currentBlockNum = end
if batchSize < s.cfg.eth1HeaderReqLimit {
// update the batchSize with additive increase
batchSize += additiveFactor
if batchSize > s.cfg.eth1HeaderReqLimit {
batchSize = s.cfg.eth1HeaderReqLimit
}
}
return currentBlockNum, batchSize, nil
}
// requestBatchedHeadersAndLogs requests and processes all the headers and
// logs from the period last polled to now.
func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {

View File

@@ -131,10 +131,10 @@ func handleGetSSZ(
respHasError, errJson := apimiddleware.HandleGrpcResponseError(endpoint.Err, grpcResponse, grpcResponseBody, w)
if errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return
return true
}
if respHasError {
return
return true
}
if errJson := apimiddleware.DeserializeGrpcResponseBodyIntoContainer(grpcResponseBody, config.responseJson); errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
@@ -191,10 +191,10 @@ func handlePostSSZ(
respHasError, errJson := apimiddleware.HandleGrpcResponseError(endpoint.Err, grpcResponse, grpcResponseBody, w)
if errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return
return true
}
if respHasError {
return
return true
}
if errJson := apimiddleware.Cleanup(grpcResponse.Body); errJson != nil {
apimiddleware.WriteError(w, errJson, nil)

View File

@@ -12,12 +12,10 @@ import (
// Requests and responses.
//----------------
// genesisResponseJson is used in /beacon/genesis API endpoint.
type genesisResponseJson struct {
Data *genesisResponse_GenesisJson `json:"data"`
}
// genesisResponse_GenesisJson is used in /beacon/genesis API endpoint.
type genesisResponse_GenesisJson struct {
GenesisTime string `json:"genesis_time" time:"true"`
GenesisValidatorsRoot string `json:"genesis_validators_root" hex:"true"`
@@ -33,158 +31,130 @@ type WeakSubjectivityResponse struct {
} `json:"data"`
}
// feeRecipientsRequestJson is used in /validator/prepare_beacon_proposers API endpoint.
type feeRecipientsRequestJSON struct {
Recipients []*feeRecipientJson `json:"recipients"`
}
// stateRootResponseJson is used in /beacon/states/{state_id}/root API endpoint.
type stateRootResponseJson struct {
Data *stateRootResponse_StateRootJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
// stateRootResponse_StateRootJson is used in /beacon/states/{state_id}/root API endpoint.
type stateRootResponse_StateRootJson struct {
StateRoot string `json:"root" hex:"true"`
}
// stateForkResponseJson is used in /beacon/states/{state_id}/fork API endpoint.
type stateForkResponseJson struct {
Data *forkJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
// stateFinalityCheckpointResponseJson is used in /beacon/states/{state_id}/finality_checkpoints API endpoint.
type stateFinalityCheckpointResponseJson struct {
Data *stateFinalityCheckpointResponse_StateFinalityCheckpointJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
// stateFinalityCheckpointResponse_StateFinalityCheckpointJson is used in /beacon/states/{state_id}/finality_checkpoints API endpoint.
type stateFinalityCheckpointResponse_StateFinalityCheckpointJson struct {
PreviousJustified *checkpointJson `json:"previous_justified"`
CurrentJustified *checkpointJson `json:"current_justified"`
Finalized *checkpointJson `json:"finalized"`
}
// stateValidatorResponseJson is used in /beacon/states/{state_id}/validators API endpoint.
type stateValidatorsResponseJson struct {
Data []*validatorContainerJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
// stateValidatorResponseJson is used in /beacon/states/{state_id}/validators/{validator_id} API endpoint.
type stateValidatorResponseJson struct {
Data *validatorContainerJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
// validatorBalancesResponseJson is used in /beacon/states/{state_id}/validator_balances API endpoint.
type validatorBalancesResponseJson struct {
Data []*validatorBalanceJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
// stateCommitteesResponseJson is used in /beacon/states/{state_id}/committees API endpoint.
type stateCommitteesResponseJson struct {
Data []*committeeJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
// syncCommitteesResponseJson is used in /beacon/states/{state_id}/sync_committees API endpoint.
type syncCommitteesResponseJson struct {
Data *syncCommitteeValidatorsJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
// blockHeadersResponseJson is used in /beacon/headers API endpoint.
type blockHeadersResponseJson struct {
Data []*blockHeaderContainerJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
// blockHeaderResponseJson is used in /beacon/headers/{block_id} API endpoint.
type blockHeaderResponseJson struct {
Data *blockHeaderContainerJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
// blockResponseJson is used in /beacon/blocks/{block_id} API endpoint.
type blockResponseJson struct {
Data *signedBeaconBlockContainerJson `json:"data"`
}
// blockV2ResponseJson is used in /v2/beacon/blocks/{block_id} API endpoint.
type blockV2ResponseJson struct {
Version string `json:"version" enum:"true"`
Data *signedBeaconBlockContainerV2Json `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
// blockRootResponseJson is used in /beacon/blocks/{block_id}/root API endpoint.
type blockRootResponseJson struct {
Data *blockRootContainerJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
// blockAttestationsResponseJson is used in /beacon/blocks/{block_id}/attestations API endpoint.
type blockAttestationsResponseJson struct {
Data []*attestationJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
// attestationsPoolResponseJson is used in /beacon/pool/attestations GET API endpoint.
type attestationsPoolResponseJson struct {
Data []*attestationJson `json:"data"`
}
// submitAttestationRequestJson is used in /beacon/pool/attestations POST API endpoint.
type submitAttestationRequestJson struct {
Data []*attestationJson `json:"data"`
}
// attesterSlashingsPoolResponseJson is used in /beacon/pool/attester_slashings API endpoint.
type attesterSlashingsPoolResponseJson struct {
Data []*attesterSlashingJson `json:"data"`
}
// proposerSlashingsPoolResponseJson is used in /beacon/pool/proposer_slashings API endpoint.
type proposerSlashingsPoolResponseJson struct {
Data []*proposerSlashingJson `json:"data"`
}
// voluntaryExitsPoolResponseJson is used in /beacon/pool/voluntary_exits API endpoint.
type voluntaryExitsPoolResponseJson struct {
Data []*signedVoluntaryExitJson `json:"data"`
}
// submitSyncCommitteeSignaturesRequestJson is used in /beacon/pool/sync_committees API endpoint.
type submitSyncCommitteeSignaturesRequestJson struct {
Data []*syncCommitteeMessageJson `json:"data"`
}
// identityResponseJson is used in /node/identity API endpoint.
type identityResponseJson struct {
Data *identityJson `json:"data"`
}
// peersResponseJson is used in /node/peers API endpoint.
type peersResponseJson struct {
Data []*peerJson `json:"data"`
}
// peerResponseJson is used in /node/peers/{peer_id} API endpoint.
type peerResponseJson struct {
Data *peerJson `json:"data"`
}
// peerCountResponseJson is used in /node/peer_count API endpoint.
type peerCountResponseJson struct {
Data peerCountResponse_PeerCountJson `json:"data"`
}
// peerCountResponse_PeerCountJson is used in /node/peer_count API endpoint.
type peerCountResponse_PeerCountJson struct {
Disconnected string `json:"disconnected"`
Connecting string `json:"connecting"`
@@ -192,111 +162,91 @@ type peerCountResponse_PeerCountJson struct {
Disconnecting string `json:"disconnecting"`
}
// versionResponseJson is used in /node/version API endpoint.
type versionResponseJson struct {
Data *versionJson `json:"data"`
}
// syncingResponseJson is used in /node/syncing API endpoint.
type syncingResponseJson struct {
Data *helpers.SyncDetailsJson `json:"data"`
}
// beaconStateResponseJson is used in /debug/beacon/states/{state_id} API endpoint.
type beaconStateResponseJson struct {
Data *beaconStateJson `json:"data"`
}
// beaconStateV2ResponseJson is used in /v2/debug/beacon/states/{state_id} API endpoint.
type beaconStateV2ResponseJson struct {
Version string `json:"version" enum:"true"`
Data *beaconStateContainerV2Json `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
// forkChoiceHeadsResponseJson is used in /v1/debug/beacon/heads API endpoint.
type forkChoiceHeadsResponseJson struct {
Data []*forkChoiceHeadJson `json:"data"`
}
// v2ForkChoiceHeadsResponseJson is used in /v2/debug/beacon/heads API endpoint.
type v2ForkChoiceHeadsResponseJson struct {
Data []*v2ForkChoiceHeadJson `json:"data"`
}
// forkScheduleResponseJson is used in /config/fork_schedule API endpoint.
type forkScheduleResponseJson struct {
Data []*forkJson `json:"data"`
}
// depositContractResponseJson is used in /config/deposit_contract API endpoint.
type depositContractResponseJson struct {
Data *depositContractJson `json:"data"`
}
// specResponseJson is used in /config/spec API endpoint.
type specResponseJson struct {
Data interface{} `json:"data"`
}
// dutiesRequestJson is used in several duties-related API endpoints.
type dutiesRequestJson struct {
Index []string `json:"index"`
}
// attesterDutiesResponseJson is used in /validator/duties/attester/{epoch} API endpoint.
type attesterDutiesResponseJson struct {
DependentRoot string `json:"dependent_root" hex:"true"`
Data []*attesterDutyJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
// proposerDutiesResponseJson is used in /validator/duties/proposer/{epoch} API endpoint.
type proposerDutiesResponseJson struct {
DependentRoot string `json:"dependent_root" hex:"true"`
Data []*proposerDutyJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
// syncCommitteeDutiesResponseJson is used in /validator/duties/sync/{epoch} API endpoint.
type syncCommitteeDutiesResponseJson struct {
Data []*syncCommitteeDuty `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
// produceBlockResponseJson is used in /validator/blocks/{slot} API endpoint.
type produceBlockResponseJson struct {
Data *beaconBlockJson `json:"data"`
}
// produceBlockResponseV2Json is used in /v2/validator/blocks/{slot} API endpoint.
type produceBlockResponseV2Json struct {
Version string `json:"version"`
Data *beaconBlockContainerV2Json `json:"data"`
}
// produceBlindedBlockResponseJson is used in /v1/validator/blinded_blocks/{slot} API endpoint.
type produceBlindedBlockResponseJson struct {
Version string `json:"version"`
Data *blindedBeaconBlockContainerJson `json:"data"`
}
// produceAttestationDataResponseJson is used in /validator/attestation_data API endpoint.
type produceAttestationDataResponseJson struct {
Data *attestationDataJson `json:"data"`
}
// aggregateAttestationResponseJson is used in /validator/aggregate_attestation API endpoint.
type aggregateAttestationResponseJson struct {
Data *attestationJson `json:"data"`
}
// submitBeaconCommitteeSubscriptionsRequestJson is used in /validator/beacon_committee_subscriptions API endpoint.
type submitBeaconCommitteeSubscriptionsRequestJson struct {
Data []*beaconCommitteeSubscribeJson `json:"data"`
}
// beaconCommitteeSubscribeJson is used in /validator/beacon_committee_subscriptions API endpoint.
type beaconCommitteeSubscribeJson struct {
ValidatorIndex string `json:"validator_index"`
CommitteeIndex string `json:"committee_index"`
@@ -305,29 +255,24 @@ type beaconCommitteeSubscribeJson struct {
IsAggregator bool `json:"is_aggregator"`
}
// submitBeaconCommitteeSubscriptionsRequestJson is used in /validator/sync_committee_subscriptions API endpoint.
type submitSyncCommitteeSubscriptionRequestJson struct {
Data []*syncCommitteeSubscriptionJson `json:"data"`
}
// syncCommitteeSubscriptionJson is used in /validator/sync_committee_subscriptions API endpoint.
type syncCommitteeSubscriptionJson struct {
ValidatorIndex string `json:"validator_index"`
SyncCommitteeIndices []string `json:"sync_committee_indices"`
UntilEpoch string `json:"until_epoch"`
}
// submitAggregateAndProofsRequestJson is used in /validator/aggregate_and_proofs API endpoint.
type submitAggregateAndProofsRequestJson struct {
Data []*signedAggregateAttestationAndProofJson `json:"data"`
}
// produceSyncCommitteeContributionResponseJson is used in /validator/sync_committee_contribution API endpoint.
type produceSyncCommitteeContributionResponseJson struct {
Data *syncCommitteeContributionJson `json:"data"`
}
// submitContributionAndProofsRequestJson is used in /validator/contribution_and_proofs API endpoint.
type submitContributionAndProofsRequestJson struct {
Data []*signedContributionAndProofJson `json:"data"`
}

View File

@@ -188,7 +188,7 @@ func (vs *Server) computeStateRoot(ctx context.Context, block interfaces.SignedB
return root[:], nil
}
// SubmitValidatorRegistration submits validator registration.
func (vs *Server) SubmitValidatorRegistration(context.Context, *ethpb.SignedValidatorRegistrationV1) (*emptypb.Empty, error) {
// SubmitValidatorRegistration submits validator registrations.
func (vs *Server) SubmitValidatorRegistration(context.Context, *ethpb.SignedValidatorRegistrationsV1) (*emptypb.Empty, error) {
return &emptypb.Empty{}, nil
}

View File

@@ -47,7 +47,7 @@ func TestServer_getExecutionPayload(t *testing.T) {
}))
transitionSt, _ := util.DeterministicGenesisStateBellatrix(t, 1)
require.NoError(t, transitionSt.SetLatestExecutionPayloadHeader(&ethpb.ExecutionPayloadHeader{BlockNumber: 1}))
require.NoError(t, transitionSt.SetLatestExecutionPayloadHeader(&pb.ExecutionPayloadHeader{BlockNumber: 1}))
b2pb := util.NewBeaconBlockBellatrix()
b2r, err := b2pb.Block.HashTreeRoot()
require.NoError(t, err)
@@ -148,7 +148,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) {
}))
transitionSt, _ := util.DeterministicGenesisStateBellatrix(t, 1)
require.NoError(t, transitionSt.SetLatestExecutionPayloadHeader(&ethpb.ExecutionPayloadHeader{BlockNumber: 1}))
require.NoError(t, transitionSt.SetLatestExecutionPayloadHeader(&pb.ExecutionPayloadHeader{BlockNumber: 1}))
b2pb := util.NewBeaconBlockBellatrix()
b2r, err := b2pb.Block.HashTreeRoot()
require.NoError(t, err)

View File

@@ -26,6 +26,7 @@ go_test(
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/state/stategen/mock:go_default_library",
"//config/params:go_default_library",

View File

@@ -19,10 +19,6 @@ import (
"go.opencensus.io/trace"
)
var ErrNoAncestorForBlock = errors.New("could not find an ancestor state for block")
var ErrNoCanonicalBlockForSlot = errors.New("none of the blocks found in the db slot index are canonical")
var ErrInvalidDBBlock = errors.New("invalid block found in database")
// StateIdParseError represents an error scenario where a state ID could not be parsed.
type StateIdParseError struct {
message string
@@ -210,6 +206,13 @@ func (p *StateProvider) StateBySlot(ctx context.Context, target types.Slot) (sta
ctx, span := trace.StartSpan(ctx, "statefetcher.StateBySlot")
defer span.End()
if target > p.GenesisTimeFetcher.CurrentSlot() {
return nil, errors.New("requested slot is in the future")
}
if target > p.ChainInfoFetcher.HeadSlot() {
return nil, errors.New("requested slot number is higher than head slot number")
}
st, err := p.ReplayerBuilder.ReplayerForSlot(target).ReplayBlocks(ctx)
if err != nil {
msg := fmt.Sprintf("error while replaying history to slot=%d", target)

View File

@@ -7,6 +7,7 @@ import (
"testing"
"time"
statenative "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/ethereum/go-ethereum/common/hexutil"
@@ -78,9 +79,12 @@ func TestGetState(t *testing.T) {
cc := &mockstategen.MockCanonicalChecker{Is: true}
cs := &mockstategen.MockCurrentSlotter{Slot: bs.Slot() + 1}
ch := stategen.NewCanonicalHistory(db, cc, cs)
currentSlot := types.Slot(0)
p := StateProvider{
BeaconDB: db,
ReplayerBuilder: ch,
BeaconDB: db,
ReplayerBuilder: ch,
GenesisTimeFetcher: &chainMock.ChainService{Slot: &currentSlot},
ChainInfoFetcher: &chainMock.ChainService{State: bs},
}
s, err := p.State(ctx, []byte("genesis"))
@@ -165,6 +169,7 @@ func TestGetState(t *testing.T) {
CanonicalRoots: map[[32]byte]bool{
bytesutil.ToBytes32(newBeaconState.LatestBlockHeader().ParentRoot): true,
},
State: newBeaconState,
},
ReplayerBuilder: mockstategen.NewMockReplayerBuilder(mockstategen.WithMockState(newBeaconState)),
}
@@ -176,18 +181,6 @@ func TestGetState(t *testing.T) {
assert.Equal(t, stateRoot, sRoot)
})
rb := mockstategen.NewMockReplayerBuilder(mockstategen.WithStateError(1, stategen.ErrFutureSlotRequested))
t.Run("slot_too_big", func(t *testing.T) {
p := StateProvider{
GenesisTimeFetcher: &chainMock.ChainService{
Genesis: time.Now(),
},
ReplayerBuilder: rb,
}
_, err := p.State(ctx, []byte(strconv.FormatUint(1, 10)))
assert.ErrorContains(t, "cannot replay to future slots", err)
})
t.Run("invalid_state", func(t *testing.T) {
p := StateProvider{}
_, err := p.State(ctx, []byte("foo"))
@@ -394,3 +387,20 @@ func TestNewStateNotFoundError(t *testing.T) {
e := NewStateNotFoundError(100)
assert.Equal(t, "state not found in the last 100 state roots", e.message)
}
func TestStateBySlot_FutureSlot(t *testing.T) {
slot := types.Slot(100)
p := StateProvider{GenesisTimeFetcher: &chainMock.ChainService{Slot: &slot}}
_, err := p.StateBySlot(context.Background(), 101)
assert.ErrorContains(t, "requested slot is in the future", err)
}
func TestStateBySlot_AfterHeadSlot(t *testing.T) {
st, err := statenative.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 100})
require.NoError(t, err)
currentSlot := types.Slot(102)
mock := &chainMock.ChainService{State: st, Slot: &currentSlot}
p := StateProvider{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
_, err = p.StateBySlot(context.Background(), 101)
assert.ErrorContains(t, "requested slot number is higher than head slot number", err)
}

View File

@@ -12,6 +12,7 @@ go_library(
deps = [
"//config/fieldparams:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",

View File

@@ -19,12 +19,13 @@ var (
// trie of the particular field.
type FieldTrie struct {
*sync.RWMutex
reference *stateutil.Reference
fieldLayers [][]*[32]byte
field types.BeaconStateField
dataType types.DataType
length uint64
numOfElems int
reference *stateutil.Reference
fieldLayers [][]*[32]byte
field types.BeaconStateField
dataType types.DataType
length uint64
numOfElems int
isTransferred bool
}
// NewFieldTrie is the constructor for the field trie data structure. It creates the corresponding
@@ -191,6 +192,43 @@ func (f *FieldTrie) CopyTrie() *FieldTrie {
}
}
// Length return the length of the whole field trie.
func (f *FieldTrie) Length() uint64 {
return f.length
}
// TransferTrie starts the process of transferring all the
// trie related data to a new trie. This is done if we
// know that other states which hold references to this
// trie will unlikely need it for recomputation. This helps
// us save on a copy. Any caller of this method will need
// to take care that this isn't called on an empty trie.
func (f *FieldTrie) TransferTrie() *FieldTrie {
if f.fieldLayers == nil {
return &FieldTrie{
field: f.field,
dataType: f.dataType,
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: f.length,
numOfElems: f.numOfElems,
}
}
f.isTransferred = true
nTrie := &FieldTrie{
fieldLayers: f.fieldLayers,
field: f.field,
dataType: f.dataType,
reference: stateutil.NewRef(1),
RWMutex: new(sync.RWMutex),
length: f.length,
numOfElems: f.numOfElems,
}
// Zero out field layers here.
f.fieldLayers = nil
return nTrie
}
// TrieRoot returns the corresponding root of the trie.
func (f *FieldTrie) TrieRoot() ([32]byte, error) {
if f.Empty() {
@@ -222,7 +260,7 @@ func (f *FieldTrie) FieldReference() *stateutil.Reference {
// Empty checks whether the underlying field trie is
// empty or not.
func (f *FieldTrie) Empty() bool {
return f == nil || len(f.fieldLayers) == 0
return f == nil || len(f.fieldLayers) == 0 || f.isTransferred
}
// InsertFieldLayer manually inserts a field layer. This method

View File

@@ -9,6 +9,7 @@ import (
"github.com/prysmaticlabs/go-bitfield"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
@@ -60,7 +61,7 @@ type ReadOnlyBeaconState interface {
MarshalSSZ() ([]byte, error)
IsNil() bool
Version() int
LatestExecutionPayloadHeader() (*ethpb.ExecutionPayloadHeader, error)
LatestExecutionPayloadHeader() (*enginev1.ExecutionPayloadHeader, error)
}
// WriteOnlyBeaconState defines a struct which only has write access to beacon state methods.
@@ -82,7 +83,7 @@ type WriteOnlyBeaconState interface {
SetSlashings(val []uint64) error
UpdateSlashingsAtIndex(idx, val uint64) error
AppendHistoricalRoots(root [32]byte) error
SetLatestExecutionPayloadHeader(payload *ethpb.ExecutionPayloadHeader) error
SetLatestExecutionPayloadHeader(payload *enginev1.ExecutionPayloadHeader) error
}
// ReadOnlyValidator defines a struct which only has read access to validator methods.

View File

@@ -69,6 +69,7 @@ go_library(
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_ferranbt_fastssz//:go_default_library",
@@ -114,6 +115,7 @@ go_test(
"//consensus-types/primitives:go_default_library",
"//container/trie:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/interop:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -11,6 +11,7 @@ import (
nativetypes "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
eth2types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
@@ -18,33 +19,33 @@ import (
// getters and setters for its respective values and helpful functions such as HashTreeRoot().
type BeaconState struct {
version int
genesisTime uint64 `ssz-gen:"true"`
genesisValidatorsRoot customtypes.Byte32 `ssz-gen:"true" ssz-size:"32"`
slot eth2types.Slot `ssz-gen:"true"`
fork *ethpb.Fork `ssz-gen:"true"`
latestBlockHeader *ethpb.BeaconBlockHeader `ssz-gen:"true"`
blockRoots *customtypes.BlockRoots `ssz-gen:"true" ssz-size:"8192,32"`
stateRoots *customtypes.StateRoots `ssz-gen:"true" ssz-size:"8192,32"`
historicalRoots customtypes.HistoricalRoots `ssz-gen:"true" ssz-size:"?,32" ssz-max:"16777216"`
eth1Data *ethpb.Eth1Data `ssz-gen:"true"`
eth1DataVotes []*ethpb.Eth1Data `ssz-gen:"true" ssz-max:"2048"`
eth1DepositIndex uint64 `ssz-gen:"true"`
validators []*ethpb.Validator `ssz-gen:"true" ssz-max:"1099511627776"`
balances []uint64 `ssz-gen:"true" ssz-max:"1099511627776"`
randaoMixes *customtypes.RandaoMixes `ssz-gen:"true" ssz-size:"65536,32"`
slashings []uint64 `ssz-gen:"true" ssz-size:"8192"`
previousEpochAttestations []*ethpb.PendingAttestation `ssz-gen:"true" ssz-max:"4096"`
currentEpochAttestations []*ethpb.PendingAttestation `ssz-gen:"true" ssz-max:"4096"`
previousEpochParticipation []byte `ssz-gen:"true" ssz-max:"1099511627776"`
currentEpochParticipation []byte `ssz-gen:"true" ssz-max:"1099511627776"`
justificationBits bitfield.Bitvector4 `ssz-gen:"true" ssz-size:"1"`
previousJustifiedCheckpoint *ethpb.Checkpoint `ssz-gen:"true"`
currentJustifiedCheckpoint *ethpb.Checkpoint `ssz-gen:"true"`
finalizedCheckpoint *ethpb.Checkpoint `ssz-gen:"true"`
inactivityScores []uint64 `ssz-gen:"true" ssz-max:"1099511627776"`
currentSyncCommittee *ethpb.SyncCommittee `ssz-gen:"true"`
nextSyncCommittee *ethpb.SyncCommittee `ssz-gen:"true"`
latestExecutionPayloadHeader *ethpb.ExecutionPayloadHeader `ssz-gen:"true"`
genesisTime uint64 `ssz-gen:"true"`
genesisValidatorsRoot customtypes.Byte32 `ssz-gen:"true" ssz-size:"32"`
slot eth2types.Slot `ssz-gen:"true"`
fork *ethpb.Fork `ssz-gen:"true"`
latestBlockHeader *ethpb.BeaconBlockHeader `ssz-gen:"true"`
blockRoots *customtypes.BlockRoots `ssz-gen:"true" ssz-size:"8192,32"`
stateRoots *customtypes.StateRoots `ssz-gen:"true" ssz-size:"8192,32"`
historicalRoots customtypes.HistoricalRoots `ssz-gen:"true" ssz-size:"?,32" ssz-max:"16777216"`
eth1Data *ethpb.Eth1Data `ssz-gen:"true"`
eth1DataVotes []*ethpb.Eth1Data `ssz-gen:"true" ssz-max:"2048"`
eth1DepositIndex uint64 `ssz-gen:"true"`
validators []*ethpb.Validator `ssz-gen:"true" ssz-max:"1099511627776"`
balances []uint64 `ssz-gen:"true" ssz-max:"1099511627776"`
randaoMixes *customtypes.RandaoMixes `ssz-gen:"true" ssz-size:"65536,32"`
slashings []uint64 `ssz-gen:"true" ssz-size:"8192"`
previousEpochAttestations []*ethpb.PendingAttestation `ssz-gen:"true" ssz-max:"4096"`
currentEpochAttestations []*ethpb.PendingAttestation `ssz-gen:"true" ssz-max:"4096"`
previousEpochParticipation []byte `ssz-gen:"true" ssz-max:"1099511627776"`
currentEpochParticipation []byte `ssz-gen:"true" ssz-max:"1099511627776"`
justificationBits bitfield.Bitvector4 `ssz-gen:"true" ssz-size:"1"`
previousJustifiedCheckpoint *ethpb.Checkpoint `ssz-gen:"true"`
currentJustifiedCheckpoint *ethpb.Checkpoint `ssz-gen:"true"`
finalizedCheckpoint *ethpb.Checkpoint `ssz-gen:"true"`
inactivityScores []uint64 `ssz-gen:"true" ssz-max:"1099511627776"`
currentSyncCommittee *ethpb.SyncCommittee `ssz-gen:"true"`
nextSyncCommittee *ethpb.SyncCommittee `ssz-gen:"true"`
latestExecutionPayloadHeader *enginev1.ExecutionPayloadHeader `ssz-gen:"true"`
lock sync.RWMutex
dirtyFields map[nativetypes.FieldIndex]bool

View File

@@ -11,6 +11,7 @@ import (
nativetypes "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native/types"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
eth2types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
@@ -18,33 +19,33 @@ import (
// getters and setters for its respective values and helpful functions such as HashTreeRoot().
type BeaconState struct {
version int
genesisTime uint64 `ssz-gen:"true"`
genesisValidatorsRoot customtypes.Byte32 `ssz-gen:"true" ssz-size:"32"`
slot eth2types.Slot `ssz-gen:"true"`
fork *ethpb.Fork `ssz-gen:"true"`
latestBlockHeader *ethpb.BeaconBlockHeader `ssz-gen:"true"`
blockRoots *customtypes.BlockRoots `ssz-gen:"true" ssz-size:"64,32"`
stateRoots *customtypes.StateRoots `ssz-gen:"true" ssz-size:"64,32"`
historicalRoots customtypes.HistoricalRoots `ssz-gen:"true" ssz-size:"?,32" ssz-max:"16777216"`
eth1Data *ethpb.Eth1Data `ssz-gen:"true"`
eth1DataVotes []*ethpb.Eth1Data `ssz-gen:"true" ssz-max:"32"`
eth1DepositIndex uint64 `ssz-gen:"true"`
validators []*ethpb.Validator `ssz-gen:"true" ssz-max:"1099511627776"`
balances []uint64 `ssz-gen:"true" ssz-max:"1099511627776"`
randaoMixes *customtypes.RandaoMixes `ssz-gen:"true" ssz-size:"64,32"`
slashings []uint64 `ssz-gen:"true" ssz-size:"64"`
previousEpochAttestations []*ethpb.PendingAttestation `ssz-gen:"true" ssz-max:"1024"`
currentEpochAttestations []*ethpb.PendingAttestation `ssz-gen:"true" ssz-max:"1024"`
previousEpochParticipation []byte `ssz-gen:"true" ssz-max:"1099511627776"`
currentEpochParticipation []byte `ssz-gen:"true" ssz-max:"1099511627776"`
justificationBits bitfield.Bitvector4 `ssz-gen:"true" ssz-size:"1"`
previousJustifiedCheckpoint *ethpb.Checkpoint `ssz-gen:"true"`
currentJustifiedCheckpoint *ethpb.Checkpoint `ssz-gen:"true"`
finalizedCheckpoint *ethpb.Checkpoint `ssz-gen:"true"`
inactivityScores []uint64 `ssz-gen:"true" ssz-max:"1099511627776"`
currentSyncCommittee *ethpb.SyncCommittee `ssz-gen:"true"`
nextSyncCommittee *ethpb.SyncCommittee `ssz-gen:"true"`
latestExecutionPayloadHeader *ethpb.ExecutionPayloadHeader `ssz-gen:"true"`
genesisTime uint64 `ssz-gen:"true"`
genesisValidatorsRoot customtypes.Byte32 `ssz-gen:"true" ssz-size:"32"`
slot eth2types.Slot `ssz-gen:"true"`
fork *ethpb.Fork `ssz-gen:"true"`
latestBlockHeader *ethpb.BeaconBlockHeader `ssz-gen:"true"`
blockRoots *customtypes.BlockRoots `ssz-gen:"true" ssz-size:"64,32"`
stateRoots *customtypes.StateRoots `ssz-gen:"true" ssz-size:"64,32"`
historicalRoots customtypes.HistoricalRoots `ssz-gen:"true" ssz-size:"?,32" ssz-max:"16777216"`
eth1Data *ethpb.Eth1Data `ssz-gen:"true"`
eth1DataVotes []*ethpb.Eth1Data `ssz-gen:"true" ssz-max:"32"`
eth1DepositIndex uint64 `ssz-gen:"true"`
validators []*ethpb.Validator `ssz-gen:"true" ssz-max:"1099511627776"`
balances []uint64 `ssz-gen:"true" ssz-max:"1099511627776"`
randaoMixes *customtypes.RandaoMixes `ssz-gen:"true" ssz-size:"64,32"`
slashings []uint64 `ssz-gen:"true" ssz-size:"64"`
previousEpochAttestations []*ethpb.PendingAttestation `ssz-gen:"true" ssz-max:"1024"`
currentEpochAttestations []*ethpb.PendingAttestation `ssz-gen:"true" ssz-max:"1024"`
previousEpochParticipation []byte `ssz-gen:"true" ssz-max:"1099511627776"`
currentEpochParticipation []byte `ssz-gen:"true" ssz-max:"1099511627776"`
justificationBits bitfield.Bitvector4 `ssz-gen:"true" ssz-size:"1"`
previousJustifiedCheckpoint *ethpb.Checkpoint `ssz-gen:"true"`
currentJustifiedCheckpoint *ethpb.Checkpoint `ssz-gen:"true"`
finalizedCheckpoint *ethpb.Checkpoint `ssz-gen:"true"`
inactivityScores []uint64 `ssz-gen:"true" ssz-max:"1099511627776"`
currentSyncCommittee *ethpb.SyncCommittee `ssz-gen:"true"`
nextSyncCommittee *ethpb.SyncCommittee `ssz-gen:"true"`
latestExecutionPayloadHeader *enginev1.ExecutionPayloadHeader `ssz-gen:"true"`
lock sync.RWMutex
dirtyFields map[nativetypes.FieldIndex]bool

View File

@@ -2,4 +2,4 @@ package state_native
import "errors"
var ErrNilParticipation = errors.New("Nil epoch participation in state")
var ErrNilParticipation = errors.New("nil epoch participation in state")

View File

@@ -1,12 +1,13 @@
package state_native
import (
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
)
// LatestExecutionPayloadHeader of the beacon state.
func (b *BeaconState) LatestExecutionPayloadHeader() (*ethpb.ExecutionPayloadHeader, error) {
func (b *BeaconState) LatestExecutionPayloadHeader() (*enginev1.ExecutionPayloadHeader, error) {
if b.version == version.Phase0 || b.version == version.Altair {
return nil, errNotSupported("LatestExecutionPayloadHeader", b.version)
}
@@ -23,6 +24,6 @@ func (b *BeaconState) LatestExecutionPayloadHeader() (*ethpb.ExecutionPayloadHea
// latestExecutionPayloadHeaderVal of the beacon state.
// This assumes that a lock is already held on BeaconState.
func (b *BeaconState) latestExecutionPayloadHeaderVal() *ethpb.ExecutionPayloadHeader {
func (b *BeaconState) latestExecutionPayloadHeaderVal() *enginev1.ExecutionPayloadHeader {
return ethpb.CopyExecutionPayloadHeader(b.latestExecutionPayloadHeader)
}

View File

@@ -11,6 +11,7 @@ import (
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
@@ -325,7 +326,7 @@ func syncCommittee(prefix string) *ethpb.SyncCommittee {
}
}
func executionPayloadHeader() *ethpb.ExecutionPayloadHeader {
func executionPayloadHeader() *enginev1.ExecutionPayloadHeader {
ph := bytesutil.ToBytes32([]byte("parent"))
fr := bytesutil.PadTo([]byte("fee"), 20)
sr := bytesutil.ToBytes32([]byte("state"))
@@ -336,7 +337,7 @@ func executionPayloadHeader() *ethpb.ExecutionPayloadHeader {
bf := bytesutil.ToBytes32([]byte("base"))
bh := bytesutil.ToBytes32([]byte("block"))
tr := bytesutil.ToBytes32([]byte("transactions"))
return &ethpb.ExecutionPayloadHeader{
return &enginev1.ExecutionPayloadHeader{
ParentHash: ph[:],
FeeRecipient: fr,
StateRoot: sr[:],

View File

@@ -2,12 +2,13 @@ package state_native
import (
nativetypes "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native/types"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
_ "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/runtime/version"
)
// SetLatestExecutionPayloadHeader for the beacon state.
func (b *BeaconState) SetLatestExecutionPayloadHeader(val *ethpb.ExecutionPayloadHeader) error {
func (b *BeaconState) SetLatestExecutionPayloadHeader(val *enginev1.ExecutionPayloadHeader) error {
b.lock.Lock()
defer b.lock.Unlock()

View File

@@ -740,17 +740,31 @@ func (b *BeaconState) rootSelector(ctx context.Context, field nativetypes.FieldI
func (b *BeaconState) recomputeFieldTrie(index nativetypes.FieldIndex, elements interface{}) ([32]byte, error) {
fTrie := b.stateFieldLeaves[index]
fTrieMutex := fTrie.RWMutex
// We can't lock the trie directly because the trie's variable gets reassigned,
// and therefore we would call Unlock() on a different object.
fTrieMutex := fTrie.RWMutex
if fTrie.FieldReference().Refs() > 1 {
fTrieMutex.Lock()
fTrieMutex.Lock()
if fTrie.Empty() {
err := b.resetFieldTrie(index, elements, fTrie.Length())
if err != nil {
fTrieMutex.Unlock()
return [32]byte{}, err
}
// Reduce reference count as we are instantiating a new trie.
fTrie.FieldReference().MinusRef()
newTrie := fTrie.CopyTrie()
fTrieMutex.Unlock()
return b.stateFieldLeaves[index].TrieRoot()
}
if fTrie.FieldReference().Refs() > 1 {
fTrie.FieldReference().MinusRef()
newTrie := fTrie.TransferTrie()
b.stateFieldLeaves[index] = newTrie
fTrie = newTrie
fTrieMutex.Unlock()
}
fTrieMutex.Unlock()
// remove duplicate indexes
b.dirtyIndices[index] = slice.SetUint64(b.dirtyIndices[index])
// sort indexes again

View File

@@ -383,3 +383,204 @@ func TestBeaconState_AppendValidator_DoesntMutateCopy(t *testing.T) {
_, ok := st1.ValidatorIndexByPubkey(bytesutil.ToBytes48(val.PublicKey))
assert.Equal(t, false, ok, "Expected no validator index to be present in st1 for the newly inserted pubkey")
}
func TestBeaconState_ValidatorMutation_Phase0(t *testing.T) {
testState, _ := util.DeterministicGenesisState(t, 400)
pbState, err := statenative.ProtobufBeaconStatePhase0(testState.InnerStateUnsafe())
require.NoError(t, err)
testState, err = statenative.InitializeFromProtoPhase0(pbState)
require.NoError(t, err)
_, err = testState.HashTreeRoot(context.Background())
require.NoError(t, err)
// Reset tries
require.NoError(t, testState.UpdateValidatorAtIndex(200, new(ethpb.Validator)))
_, err = testState.HashTreeRoot(context.Background())
require.NoError(t, err)
newState1 := testState.Copy()
_ = testState.Copy()
require.NoError(t, testState.UpdateValidatorAtIndex(15, &ethpb.Validator{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: 1111,
Slashed: false,
ActivationEligibilityEpoch: 1112,
ActivationEpoch: 1114,
ExitEpoch: 1116,
WithdrawableEpoch: 1117,
}))
rt, err := testState.HashTreeRoot(context.Background())
require.NoError(t, err)
pbState, err = statenative.ProtobufBeaconStatePhase0(testState.InnerStateUnsafe())
require.NoError(t, err)
copiedTestState, err := statenative.InitializeFromProtoPhase0(pbState)
require.NoError(t, err)
rt2, err := copiedTestState.HashTreeRoot(context.Background())
require.NoError(t, err)
assert.Equal(t, rt, rt2)
require.NoError(t, newState1.UpdateValidatorAtIndex(150, &ethpb.Validator{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: 2111,
Slashed: false,
ActivationEligibilityEpoch: 2112,
ActivationEpoch: 2114,
ExitEpoch: 2116,
WithdrawableEpoch: 2117,
}))
rt, err = newState1.HashTreeRoot(context.Background())
require.NoError(t, err)
pbState, err = statenative.ProtobufBeaconStatePhase0(newState1.InnerStateUnsafe())
require.NoError(t, err)
copiedTestState, err = statenative.InitializeFromProtoPhase0(pbState)
require.NoError(t, err)
rt2, err = copiedTestState.HashTreeRoot(context.Background())
require.NoError(t, err)
assert.Equal(t, rt, rt2)
}
func TestBeaconState_ValidatorMutation_Altair(t *testing.T) {
testState, _ := util.DeterministicGenesisStateAltair(t, 400)
pbState, err := statenative.ProtobufBeaconStateAltair(testState.InnerStateUnsafe())
require.NoError(t, err)
testState, err = statenative.InitializeFromProtoAltair(pbState)
require.NoError(t, err)
_, err = testState.HashTreeRoot(context.Background())
require.NoError(t, err)
// Reset tries
require.NoError(t, testState.UpdateValidatorAtIndex(200, new(ethpb.Validator)))
_, err = testState.HashTreeRoot(context.Background())
require.NoError(t, err)
newState1 := testState.Copy()
_ = testState.Copy()
require.NoError(t, testState.UpdateValidatorAtIndex(15, &ethpb.Validator{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: 1111,
Slashed: false,
ActivationEligibilityEpoch: 1112,
ActivationEpoch: 1114,
ExitEpoch: 1116,
WithdrawableEpoch: 1117,
}))
rt, err := testState.HashTreeRoot(context.Background())
require.NoError(t, err)
pbState, err = statenative.ProtobufBeaconStateAltair(testState.InnerStateUnsafe())
require.NoError(t, err)
copiedTestState, err := statenative.InitializeFromProtoAltair(pbState)
require.NoError(t, err)
rt2, err := copiedTestState.HashTreeRoot(context.Background())
require.NoError(t, err)
assert.Equal(t, rt, rt2)
require.NoError(t, newState1.UpdateValidatorAtIndex(150, &ethpb.Validator{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: 2111,
Slashed: false,
ActivationEligibilityEpoch: 2112,
ActivationEpoch: 2114,
ExitEpoch: 2116,
WithdrawableEpoch: 2117,
}))
rt, err = newState1.HashTreeRoot(context.Background())
require.NoError(t, err)
pbState, err = statenative.ProtobufBeaconStateAltair(newState1.InnerStateUnsafe())
require.NoError(t, err)
copiedTestState, err = statenative.InitializeFromProtoAltair(pbState)
require.NoError(t, err)
rt2, err = copiedTestState.HashTreeRoot(context.Background())
require.NoError(t, err)
assert.Equal(t, rt, rt2)
}
func TestBeaconState_ValidatorMutation_Bellatrix(t *testing.T) {
testState, _ := util.DeterministicGenesisStateBellatrix(t, 400)
pbState, err := statenative.ProtobufBeaconStateBellatrix(testState.InnerStateUnsafe())
require.NoError(t, err)
testState, err = statenative.InitializeFromProtoBellatrix(pbState)
require.NoError(t, err)
_, err = testState.HashTreeRoot(context.Background())
require.NoError(t, err)
// Reset tries
require.NoError(t, testState.UpdateValidatorAtIndex(200, new(ethpb.Validator)))
_, err = testState.HashTreeRoot(context.Background())
require.NoError(t, err)
newState1 := testState.Copy()
_ = testState.Copy()
require.NoError(t, testState.UpdateValidatorAtIndex(15, &ethpb.Validator{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: 1111,
Slashed: false,
ActivationEligibilityEpoch: 1112,
ActivationEpoch: 1114,
ExitEpoch: 1116,
WithdrawableEpoch: 1117,
}))
rt, err := testState.HashTreeRoot(context.Background())
require.NoError(t, err)
pbState, err = statenative.ProtobufBeaconStateBellatrix(testState.InnerStateUnsafe())
require.NoError(t, err)
copiedTestState, err := statenative.InitializeFromProtoBellatrix(pbState)
require.NoError(t, err)
rt2, err := copiedTestState.HashTreeRoot(context.Background())
require.NoError(t, err)
assert.Equal(t, rt, rt2)
require.NoError(t, newState1.UpdateValidatorAtIndex(150, &ethpb.Validator{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: 2111,
Slashed: false,
ActivationEligibilityEpoch: 2112,
ActivationEpoch: 2114,
ExitEpoch: 2116,
WithdrawableEpoch: 2117,
}))
rt, err = newState1.HashTreeRoot(context.Background())
require.NoError(t, err)
pbState, err = statenative.ProtobufBeaconStateBellatrix(newState1.InnerStateUnsafe())
require.NoError(t, err)
copiedTestState, err = statenative.InitializeFromProtoBellatrix(pbState)
require.NoError(t, err)
rt2, err = copiedTestState.HashTreeRoot(context.Background())
require.NoError(t, err)
assert.Equal(t, rt, rt2)
}

View File

@@ -16,7 +16,6 @@ import (
var ErrFutureSlotRequested = errors.New("cannot replay to future slots")
var ErrNoCanonicalBlockForSlot = errors.New("none of the blocks found in the db slot index are canonical")
var ErrNoBlocksBelowSlot = errors.New("no blocks found in db below slot")
var ErrInvalidDBBlock = errors.New("invalid block found in database")
var ErrReplayTargetSlotExceeded = errors.New("desired replay slot is less than state's slot")
type retrievalMethod int

View File

@@ -45,6 +45,7 @@ go_library(
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -390,17 +390,31 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
func (b *BeaconState) recomputeFieldTrie(index types.FieldIndex, elements interface{}) ([32]byte, error) {
fTrie := b.stateFieldLeaves[index]
fTrieMutex := fTrie.RWMutex
// We can't lock the trie directly because the trie's variable gets reassigned,
// and therefore we would call Unlock() on a different object.
fTrieMutex := fTrie.RWMutex
if fTrie.FieldReference().Refs() > 1 {
fTrieMutex.Lock()
fTrieMutex.Lock()
if fTrie.Empty() {
err := b.resetFieldTrie(index, elements, fTrie.Length())
if err != nil {
fTrieMutex.Unlock()
return [32]byte{}, err
}
// Reduce reference count as we are instantiating a new trie.
fTrie.FieldReference().MinusRef()
newTrie := fTrie.CopyTrie()
fTrieMutex.Unlock()
return b.stateFieldLeaves[index].TrieRoot()
}
if fTrie.FieldReference().Refs() > 1 {
fTrie.FieldReference().MinusRef()
newTrie := fTrie.TransferTrie()
b.stateFieldLeaves[index] = newTrie
fTrie = newTrie
fTrieMutex.Unlock()
}
fTrieMutex.Unlock()
// remove duplicate indexes
b.dirtyIndices[index] = slice.SetUint64(b.dirtyIndices[index])
// sort indexes again

View File

@@ -269,3 +269,70 @@ func TestBeaconState_AppendValidator_DoesntMutateCopy(t *testing.T) {
_, ok := st1.ValidatorIndexByPubkey(bytesutil.ToBytes48(val.PublicKey))
assert.Equal(t, false, ok, "Expected no validator index to be present in st1 for the newly inserted pubkey")
}
func TestBeaconState_ValidatorMutation_Phase0(t *testing.T) {
testState, _ := util.DeterministicGenesisState(t, 400)
pbState, err := v1.ProtobufBeaconState(testState.InnerStateUnsafe())
require.NoError(t, err)
testState, err = v1.InitializeFromProto(pbState)
require.NoError(t, err)
_, err = testState.HashTreeRoot(context.Background())
require.NoError(t, err)
// Reset tries
require.NoError(t, testState.UpdateValidatorAtIndex(200, new(ethpb.Validator)))
_, err = testState.HashTreeRoot(context.Background())
require.NoError(t, err)
newState1 := testState.Copy()
_ = testState.Copy()
require.NoError(t, testState.UpdateValidatorAtIndex(15, &ethpb.Validator{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: 1111,
Slashed: false,
ActivationEligibilityEpoch: 1112,
ActivationEpoch: 1114,
ExitEpoch: 1116,
WithdrawableEpoch: 1117,
}))
rt, err := testState.HashTreeRoot(context.Background())
require.NoError(t, err)
pbState, err = v1.ProtobufBeaconState(testState.InnerStateUnsafe())
require.NoError(t, err)
copiedTestState, err := v1.InitializeFromProto(pbState)
require.NoError(t, err)
rt2, err := copiedTestState.HashTreeRoot(context.Background())
require.NoError(t, err)
assert.Equal(t, rt, rt2)
require.NoError(t, newState1.UpdateValidatorAtIndex(150, &ethpb.Validator{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: 2111,
Slashed: false,
ActivationEligibilityEpoch: 2112,
ActivationEpoch: 2114,
ExitEpoch: 2116,
WithdrawableEpoch: 2117,
}))
rt, err = newState1.HashTreeRoot(context.Background())
require.NoError(t, err)
pbState, err = v1.ProtobufBeaconState(newState1.InnerStateUnsafe())
require.NoError(t, err)
copiedTestState, err = v1.InitializeFromProto(pbState)
require.NoError(t, err)
rt2, err = copiedTestState.HashTreeRoot(context.Background())
require.NoError(t, err)
assert.Equal(t, rt, rt2)
}

View File

@@ -2,6 +2,7 @@ package v1
import (
"github.com/pkg/errors"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
@@ -36,6 +37,6 @@ func (*BeaconState) NextSyncCommittee() (*ethpb.SyncCommittee, error) {
}
// LatestExecutionPayloadHeader is not supported for phase 0 beacon state.
func (*BeaconState) LatestExecutionPayloadHeader() (*ethpb.ExecutionPayloadHeader, error) {
func (*BeaconState) LatestExecutionPayloadHeader() (*enginev1.ExecutionPayloadHeader, error) {
return nil, errors.New("LatestExecutionPayloadHeader is not supported for phase 0 beacon state")
}

View File

@@ -2,6 +2,7 @@ package v1
import (
"github.com/pkg/errors"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
@@ -56,6 +57,6 @@ func (*BeaconState) SetInactivityScores(_ []uint64) error {
}
// SetLatestExecutionPayloadHeader is not supported for phase 0 beacon state.
func (*BeaconState) SetLatestExecutionPayloadHeader(val *ethpb.ExecutionPayloadHeader) error {
func (*BeaconState) SetLatestExecutionPayloadHeader(val *enginev1.ExecutionPayloadHeader) error {
return errors.New("SetLatestExecutionPayloadHeader is not supported for phase 0 beacon state")
}

View File

@@ -48,6 +48,7 @@ go_library(
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_pkg_errors//:go_default_library",
@@ -71,6 +72,7 @@ go_test(
"references_test.go",
"setters_test.go",
"state_fuzz_test.go",
"state_test.go",
"state_trie_test.go",
],
data = glob(["testdata/**"]),

View File

@@ -2,6 +2,7 @@ package v2
import (
"github.com/pkg/errors"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
@@ -16,6 +17,6 @@ func (*BeaconState) CurrentEpochAttestations() ([]*ethpb.PendingAttestation, err
}
// LatestExecutionPayloadHeader is not supported for hard fork 1 beacon state.
func (*BeaconState) LatestExecutionPayloadHeader() (*ethpb.ExecutionPayloadHeader, error) {
func (*BeaconState) LatestExecutionPayloadHeader() (*enginev1.ExecutionPayloadHeader, error) {
return nil, errors.New("LatestExecutionPayloadHeader is not supported for hard fork 1 beacon state")
}

View File

@@ -2,6 +2,7 @@ package v2
import (
"github.com/pkg/errors"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
@@ -21,6 +22,6 @@ func (*BeaconState) RotateAttestations() error {
}
// SetLatestExecutionPayloadHeader is not supported for hard fork 1 beacon state.
func (*BeaconState) SetLatestExecutionPayloadHeader(_ *ethpb.ExecutionPayloadHeader) error {
func (*BeaconState) SetLatestExecutionPayloadHeader(_ *enginev1.ExecutionPayloadHeader) error {
return errors.New("SetLatestExecutionPayloadHeader is not supported for hard fork 1 beacon state")
}

View File

@@ -2,4 +2,4 @@ package v2
import "errors"
var ErrNilParticipation = errors.New("Nil epoch participation in state")
var ErrNilParticipation = errors.New("nil epoch participation in state")

View File

@@ -0,0 +1,79 @@
package v2_test
import (
"context"
"testing"
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
)
func TestBeaconState_ValidatorMutation_Altair(t *testing.T) {
testState, _ := util.DeterministicGenesisStateAltair(t, 400)
pbState, err := v2.ProtobufBeaconState(testState.InnerStateUnsafe())
require.NoError(t, err)
testState, err = v2.InitializeFromProto(pbState)
require.NoError(t, err)
_, err = testState.HashTreeRoot(context.Background())
require.NoError(t, err)
// Reset tries
require.NoError(t, testState.UpdateValidatorAtIndex(200, new(ethpb.Validator)))
_, err = testState.HashTreeRoot(context.Background())
require.NoError(t, err)
newState1 := testState.Copy()
_ = testState.Copy()
require.NoError(t, testState.UpdateValidatorAtIndex(15, &ethpb.Validator{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: 1111,
Slashed: false,
ActivationEligibilityEpoch: 1112,
ActivationEpoch: 1114,
ExitEpoch: 1116,
WithdrawableEpoch: 1117,
}))
rt, err := testState.HashTreeRoot(context.Background())
require.NoError(t, err)
pbState, err = v2.ProtobufBeaconState(testState.InnerStateUnsafe())
require.NoError(t, err)
copiedTestState, err := v2.InitializeFromProto(pbState)
require.NoError(t, err)
rt2, err := copiedTestState.HashTreeRoot(context.Background())
require.NoError(t, err)
assert.Equal(t, rt, rt2)
require.NoError(t, newState1.UpdateValidatorAtIndex(150, &ethpb.Validator{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: 2111,
Slashed: false,
ActivationEligibilityEpoch: 2112,
ActivationEpoch: 2114,
ExitEpoch: 2116,
WithdrawableEpoch: 2117,
}))
rt, err = newState1.HashTreeRoot(context.Background())
require.NoError(t, err)
pbState, err = v2.ProtobufBeaconState(newState1.InnerStateUnsafe())
require.NoError(t, err)
copiedTestState, err = v2.InitializeFromProto(pbState)
require.NoError(t, err)
rt2, err = copiedTestState.HashTreeRoot(context.Background())
require.NoError(t, err)
assert.Equal(t, rt, rt2)
}

View File

@@ -378,17 +378,31 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
func (b *BeaconState) recomputeFieldTrie(index types.FieldIndex, elements interface{}) ([32]byte, error) {
fTrie := b.stateFieldLeaves[index]
fTrieMutex := fTrie.RWMutex
// We can't lock the trie directly because the trie's variable gets reassigned,
// and therefore we would call Unlock() on a different object.
fTrieMutex := fTrie.RWMutex
if fTrie.FieldReference().Refs() > 1 {
fTrieMutex.Lock()
fTrieMutex.Lock()
if fTrie.Empty() {
err := b.resetFieldTrie(index, elements, fTrie.Length())
if err != nil {
fTrieMutex.Unlock()
return [32]byte{}, err
}
// Reduce reference count as we are instantiating a new trie.
fTrie.FieldReference().MinusRef()
newTrie := fTrie.CopyTrie()
fTrieMutex.Unlock()
return b.stateFieldLeaves[index].TrieRoot()
}
if fTrie.FieldReference().Refs() > 1 {
fTrie.FieldReference().MinusRef()
newTrie := fTrie.TransferTrie()
b.stateFieldLeaves[index] = newTrie
fTrie = newTrie
fTrieMutex.Unlock()
}
fTrieMutex.Unlock()
// remove duplicate indexes
b.dirtyIndices[index] = slice.SetUint64(b.dirtyIndices[index])
// sort indexes again

View File

@@ -50,6 +50,7 @@ go_library(
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_pkg_errors//:go_default_library",
@@ -73,6 +74,7 @@ go_test(
"references_test.go",
"setters_test.go",
"state_fuzz_test.go",
"state_test.go",
"state_trie_test.go",
],
embed = [":go_default_library"],
@@ -90,6 +92,7 @@ go_test(
"//crypto/bls:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",

View File

@@ -2,4 +2,4 @@ package v3
import "errors"
var ErrNilParticipation = errors.New("Nil epoch participation in state")
var ErrNilParticipation = errors.New("nil epoch participation in state")

Some files were not shown because too many files have changed in this diff Show More