Compare commits

..

35 Commits

Author SHA1 Message Date
Potuz
4d24dfaaa7 typo 2022-02-24 18:42:26 -03:00
Potuz
1cd60e1606 gofmt 2022-02-24 18:40:50 -03:00
Potuz
97e4131e27 godoc 2022-02-24 18:36:00 -03:00
Potuz
0eb845153b safe block PoC 2022-02-24 18:32:28 -03:00
Potuz
37e35d5931 don't track head Root 2022-02-23 06:41:12 -03:00
terence tsao
5cf74ac498 Move zero hash 2022-02-21 17:42:17 -08:00
terence tsao
f5a0de1564 Clean ups 2022-02-21 17:41:49 -08:00
terence tsao
1c07d053e3 Radek's suggestions
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-02-21 17:00:36 -08:00
Potuz
36e4875444 fix receive_block_test 2022-02-19 09:24:58 -03:00
Potuz
2192f36f56 remove unused finalizedRoot 2022-02-19 09:08:38 -03:00
Potuz
e5758025bc Radek's and Terence's review #2 2022-02-18 21:17:44 -03:00
Potuz
a020475758 Merge branch 'develop' into forkchoice_double_tree 2022-02-18 11:36:27 -03:00
Potuz
69d9db30e0 fix spec tests 2022-02-18 08:27:24 -03:00
Potuz
b977e0cbca fix proposer Boost 2022-02-17 19:18:09 -03:00
Potuz
e901d2bf8a Radek and Terence's review 2022-02-17 10:59:11 -03:00
Potuz
2a6cf3fa4e Merge branch 'develop' into forkchoice_double_tree 2022-02-16 13:01:33 -03:00
Potuz
9a30442fc6 Merge branch 'develop' into forkchoice_double_tree 2022-02-15 18:11:49 -03:00
Potuz
2d2de83667 dead tests lintint 2022-02-15 17:34:53 -03:00
Potuz
c159646656 fix mock chainservice 2022-02-15 17:11:08 -03:00
Potuz
186eaf2c6e unused parameters in interface 2022-02-15 16:52:15 -03:00
Potuz
ea10c35825 fix rpc tests 2022-02-15 16:36:57 -03:00
Potuz
738f4a6fc3 remove move conflicts 2022-02-15 15:57:41 -03:00
Potuz
21af707e82 add metrics 2022-02-15 15:54:07 -03:00
Potuz
67d3427d49 Merge branch 'develop' into forkchoice_double_tree 2022-02-15 14:42:17 -03:00
Potuz
f28d87b4d6 Passing unit tests and spec tests 2022-02-15 14:07:47 -03:00
Potuz
7807a92a0f passing protoarray unit tests 2022-02-15 12:26:08 -03:00
Potuz
0293362e02 Passing Store Tests 2022-02-15 08:33:23 -03:00
Potuz
acb21807c5 Fix some tests 2022-02-14 17:27:52 -03:00
Potuz
ce0aeb0fbb compiling blockchain tests 2022-02-12 19:35:31 -03:00
Potuz
31503b59b8 compiling version 2022-02-12 18:54:20 -03:00
Potuz
b5dddb0550 update proto 2022-02-12 18:27:15 -03:00
Potuz
d2666cc869 add forkchoice.go 2022-02-12 18:18:09 -03:00
Potuz
caec9ac555 double tree structure initial commit 2022-02-12 18:17:43 -03:00
Potuz
71af44bc16 Fix integer overflow (#10222)
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-02-11 15:42:49 -03:00
Potuz
515cfa2260 move to tree structure 2022-02-10 22:51:02 -03:00
459 changed files with 7078 additions and 20699 deletions

View File

@@ -217,6 +217,3 @@ build:remote --remote_local_fallback
# Ignore GoStdLib with remote caching
build --modify_execution_info='GoStdlib.*=+no-remote-cache'
# Set bazel gotag
build --define gotags=bazel

View File

@@ -34,7 +34,7 @@ build --sandbox_tmpfs_path=/tmp
build --verbose_failures
build --announce_rc
build --show_progress_rate_limit=5
build --curses=no --color=no
build --curses=yes --color=no
build --keep_going
build --test_output=errors
build --flaky_test_attempts=5

View File

@@ -16,8 +16,6 @@ exports_files([
# gazelle:map_kind go_library go_library @prysm//tools/go:def.bzl
# gazelle:map_kind go_test go_test @prysm//tools/go:def.bzl
# gazelle:map_kind go_repository go_repository @prysm//tools/go:def.bzl
# gazelle:build_tags bazel
# gazelle:exclude tools/analyzers/**/testdata/**
gazelle(
name = "gazelle",
prefix = prefix,
@@ -127,7 +125,6 @@ nogo(
"//tools/analyzers/ineffassign:go_default_library",
"//tools/analyzers/properpermissions:go_default_library",
"//tools/analyzers/recursivelock:go_default_library",
"//tools/analyzers/uintcast:go_default_library",
] + select({
# nogo checks that fail with coverage enabled.
":coverage_enabled": [],

View File

@@ -183,7 +183,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.17.6",
go_version = "1.16.4",
nogo = "@//:nogo",
)
@@ -349,9 +349,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "4797a7e594a5b1f4c1c8080701613f3ee451b01ec0861499ea7d9b60877a6b23",
sha256 = "f196fe4367c2d2d01d36565c0dc6eecfa4f03adba1fc03a61d62953fce606e1f",
urls = [
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.3/prysm-web-ui.tar.gz",
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.2/prysm-web-ui.tar.gz",
],
)

View File

@@ -3,7 +3,6 @@ package apimiddleware
import (
"net/http"
"reflect"
"time"
"github.com/gorilla/mux"
)
@@ -15,7 +14,6 @@ import (
type ApiProxyMiddleware struct {
GatewayAddress string
EndpointCreator EndpointFactory
Timeout time.Duration
router *mux.Router
}
@@ -122,7 +120,7 @@ func (m *ApiProxyMiddleware) WithMiddleware(path string) http.HandlerFunc {
WriteError(w, errJson, nil)
return
}
grpcResp, errJson := m.ProxyRequest(req)
grpcResp, errJson := ProxyRequest(req)
if errJson != nil {
WriteError(w, errJson, nil)
return

View File

@@ -5,10 +5,10 @@ import (
"encoding/json"
"io"
"io/ioutil"
"net"
"net/http"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/api/grpc"
@@ -75,14 +75,11 @@ func (m *ApiProxyMiddleware) PrepareRequestForProxying(endpoint Endpoint, req *h
}
// ProxyRequest proxies the request to grpc-gateway.
func (m *ApiProxyMiddleware) ProxyRequest(req *http.Request) (*http.Response, ErrorJson) {
func ProxyRequest(req *http.Request) (*http.Response, ErrorJson) {
// We do not use http.DefaultClient because it does not have any timeout.
netClient := &http.Client{Timeout: m.Timeout}
netClient := &http.Client{Timeout: time.Minute * 2}
grpcResp, err := netClient.Do(req)
if err != nil {
if err, ok := err.(net.Error); ok && err.Timeout() {
return nil, TimeoutError()
}
return nil, InternalServerErrorWithMessage(err, "could not proxy request")
}
if grpcResp == nil {
@@ -114,14 +111,9 @@ func HandleGrpcResponseError(errJson ErrorJson, resp *http.Response, respBody []
w.Header().Set(h, v)
}
}
// Handle gRPC timeout.
if resp.StatusCode == http.StatusGatewayTimeout {
WriteError(w, TimeoutError(), resp.Header)
} else {
// Set code to HTTP code because unmarshalled body contained gRPC code.
errJson.SetCode(resp.StatusCode)
WriteError(w, errJson, resp.Header)
}
// Set code to HTTP code because unmarshalled body contained gRPC code.
errJson.SetCode(resp.StatusCode)
WriteError(w, errJson, resp.Header)
}
return responseHasError, nil
}

View File

@@ -41,13 +41,6 @@ func InternalServerError(err error) *DefaultErrorJson {
}
}
func TimeoutError() *DefaultErrorJson {
return &DefaultErrorJson{
Message: "Request timeout",
Code: http.StatusRequestTimeout,
}
}
// StatusCode returns the error's underlying error code.
func (e *DefaultErrorJson) StatusCode() int {
return e.Code

View File

@@ -52,7 +52,6 @@ type config struct {
muxHandler MuxHandler
pbHandlers []*PbMux
router *mux.Router
timeout time.Duration
}
// Gateway is the gRPC gateway to serve HTTP JSON traffic as a proxy and forward it to the gRPC server.
@@ -249,7 +248,6 @@ func (g *Gateway) registerApiMiddleware() {
g.proxy = &apimiddleware.ApiProxyMiddleware{
GatewayAddress: g.cfg.gatewayAddr,
EndpointCreator: g.cfg.apiMiddlewareEndpointFactory,
Timeout: g.cfg.timeout,
}
log.Info("Starting API middleware")
g.proxy.Run(g.cfg.router)

View File

@@ -1,10 +1,7 @@
package gateway
import (
"time"
"github.com/gorilla/mux"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/prysmaticlabs/prysm/api/gateway/apimiddleware"
)
@@ -82,12 +79,3 @@ func WithApiMiddleware(endpointFactory apimiddleware.EndpointFactory) Option {
return nil
}
}
// WithTimeout allows changing the timeout value for API calls.
func WithTimeout(seconds uint64) Option {
return func(g *Gateway) error {
g.cfg.timeout = time.Second * time.Duration(seconds)
gwruntime.DefaultContextTimeout = time.Second * time.Duration(seconds)
return nil
}
}

View File

@@ -110,8 +110,9 @@ func TestLockUnlock_CleansUnused(t *testing.T) {
lock := NewMultilock("dog", "cat", "owl")
lock.Lock()
assert.Equal(t, 3, len(locks.list))
lock.Unlock()
defer lock.Unlock()
<-time.After(100 * time.Millisecond)
wg.Done()
}()
wg.Wait()

View File

@@ -49,14 +49,12 @@ go_library(
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/powchain/engine-api-client/v1:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
@@ -66,7 +64,6 @@ go_library(
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/tracing:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
@@ -74,8 +71,6 @@ go_library(
"//runtime/version:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
@@ -106,7 +101,6 @@ go_test(
"init_test.go",
"log_test.go",
"metrics_test.go",
"mock_engine_test.go",
"mock_test.go",
"optimistic_sync_test.go",
"pow_block_test.go",

View File

@@ -6,14 +6,13 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/time/slots"
"go.opencensus.io/trace"
)
@@ -53,12 +52,12 @@ type HeadFetcher interface {
HeadETH1Data() *ethpb.Eth1Data
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool)
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error)
ProtoArrayStore() *protoarray.Store
ChainHeads() ([][32]byte, []types.Slot)
IsOptimistic(ctx context.Context) (bool, error)
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
IsOptimistic() (bool, error)
IsOptimisticForRoot(root [32]byte) (bool, error)
HeadSyncCommitteeFetcher
HeadDomainFetcher
ForkChoicer() forkchoice.ForkChoicer
}
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain.
@@ -238,6 +237,11 @@ func (s *Service) HeadETH1Data() *ethpb.Eth1Data {
return s.head.state.Eth1Data()
}
// ProtoArrayStore returns the proto array store object.
func (s *Service) ProtoArrayStore() *protoarray.Store {
return s.cfg.ForkChoiceStore.Store()
}
// GenesisTime returns the genesis time of beacon chain.
func (s *Service) GenesisTime() time.Time {
return s.genesisTime
@@ -310,34 +314,20 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index types.V
return v.PublicKey(), nil
}
// ForkChoicer returns the forkchoice interface
func (s *Service) ForkChoicer() forkchoice.ForkChoicer {
return s.cfg.ForkChoiceStore
}
// IsOptimistic returns true if the current head is optimistic.
func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
func (s *Service) IsOptimistic() (bool, error) {
s.headLock.RLock()
defer s.headLock.RUnlock()
if slots.ToEpoch(s.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
return false, nil
}
return s.cfg.ForkChoiceStore.IsOptimistic(ctx, s.head.root)
return s.cfg.ForkChoiceStore.IsOptimistic(s.head.root)
}
// IsOptimisticForRoot takes the root and slot as aguments instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
return s.cfg.ForkChoiceStore.IsOptimistic(ctx, root)
func (s *Service) IsOptimisticForRoot(root [32]byte) (bool, error) {
return s.cfg.ForkChoiceStore.IsOptimistic(root)
}
// SetGenesisTime sets the genesis time of beacon chain.
func (s *Service) SetGenesisTime(t time.Time) {
s.genesisTime = t
}
// ForkChoiceStore returns the fork choice store in the service
func (s *Service) ForkChoiceStore() forkchoice.ForkChoicer {
return s.cfg.ForkChoiceStore
}

View File

@@ -8,7 +8,6 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
@@ -41,12 +40,6 @@ func TestHeadRoot_Nil(t *testing.T) {
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], headRoot, "Incorrect pre chain start value")
}
func TestService_ForkChoiceStore(t *testing.T) {
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}}
p := c.ForkChoiceStore()
require.Equal(t, 0, int(p.FinalizedEpoch()))
}
func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
beaconDB := testDB.SetupDB(t)
@@ -284,40 +277,27 @@ func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
root = c.HeadGenesisValidatorsRoot()
require.DeepEqual(t, root[:], s.GenesisValidatorsRoot())
}
func TestService_ChainHeads_ProtoArray(t *testing.T) {
func TestService_ProtoArrayStore(t *testing.T) {
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}}
p := c.ProtoArrayStore()
require.Equal(t, 0, int(p.FinalizedEpoch()))
}
func TestService_ChainHeads(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0,
params.BeaconConfig().ZeroHash)}}
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, 0, 0))
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, 0, 0, false))
roots, slots := c.ChainHeads()
require.DeepEqual(t, [][32]byte{{'c'}, {'d'}, {'e'}}, roots)
require.DeepEqual(t, []types.Slot{102, 103, 104}, slots)
}
func TestService_ChainHeads_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}}
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, 0, 0))
roots, slots := c.ChainHeads()
require.Equal(t, 3, len(roots))
rootMap := map[[32]byte]types.Slot{[32]byte{'c'}: 102, [32]byte{'d'}: 103, [32]byte{'e'}: 104}
for i, root := range roots {
slot, ok := rootMap[root]
require.Equal(t, true, ok)
require.Equal(t, slot, slots[i])
}
}
func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 10)
c := &Service{}
@@ -376,64 +356,35 @@ func TestService_HeadValidatorIndexToPublicKeyNil(t *testing.T) {
require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p)
}
func TestService_IsOptimistic_ProtoArray(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.BellatrixForkEpoch = 0
params.OverrideBeaconConfig(cfg)
func TestService_IsOptimistic_NotOptimistic(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, false))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, false))
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, opt)
}
func TestService_IsOptimistic_DoublyLinkedTree(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.BellatrixForkEpoch = 0
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
opt, err := c.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, opt)
}
func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
ctx := context.Background()
c := &Service{genesisTime: time.Now()}
opt, err := c.IsOptimistic(ctx)
opt, err := c.IsOptimistic()
require.NoError(t, err)
require.Equal(t, false, opt)
}
func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
func TestService_IsOptimistic(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
opt, err := c.IsOptimistic()
require.NoError(t, err)
require.Equal(t, true, opt)
}
func TestService_IsOptimisticForRoot_DoublyLinkedTree(t *testing.T) {
func TestService_IsOptimisticForRoot(t *testing.T) {
ctx := context.Background()
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
opt, err := c.IsOptimisticForRoot([32]byte{'a'})
require.NoError(t, err)
require.Equal(t, true, opt)
}

View File

@@ -10,7 +10,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/features"
@@ -78,17 +77,9 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
if err != nil {
return err
}
if features.Get().EnableForkChoiceDoublyLinkedTree {
s.cfg.ForkChoiceStore = doublylinkedtree.New(j.Epoch, f.Epoch)
} else {
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
}
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j); err != nil {
return err
}
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch)
// TODO(10261) send optimistic status
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, headStartRoot); err != nil {
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j, false /* optimistic status */); err != nil {
return err
}
}

View File

@@ -1,49 +0,0 @@
package blockchain
import (
"context"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
)
type mockEngineService struct {
newPayloadError error
forkchoiceError error
blks map[[32]byte]*enginev1.ExecutionBlock
}
func (m *mockEngineService) NewPayload(context.Context, *enginev1.ExecutionPayload) ([]byte, error) {
return nil, m.newPayloadError
}
func (m *mockEngineService) ForkchoiceUpdated(context.Context, *enginev1.ForkchoiceState, *enginev1.PayloadAttributes) (*enginev1.PayloadIDBytes, []byte, error) {
return nil, nil, m.forkchoiceError
}
func (*mockEngineService) GetPayloadV1(
_ context.Context, _ enginev1.PayloadIDBytes,
) *enginev1.ExecutionPayload {
return nil
}
func (*mockEngineService) GetPayload(context.Context, [8]byte) (*enginev1.ExecutionPayload, error) {
return nil, nil
}
func (*mockEngineService) ExchangeTransitionConfiguration(context.Context, *enginev1.TransitionConfiguration) error {
return nil
}
func (*mockEngineService) LatestExecutionBlock(context.Context) (*enginev1.ExecutionBlock, error) {
return nil, nil
}
func (m *mockEngineService) ExecutionBlockByHash(_ context.Context, hash common.Hash) (*enginev1.ExecutionBlock, error) {
blk, ok := m.blks[common.BytesToHash(hash.Bytes())]
if !ok {
return nil, errors.New("block not found")
}
return blk, nil
}

View File

@@ -13,7 +13,7 @@ import (
func testServiceOptsWithDB(t *testing.T) []Option {
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
fcs := protoarray.New(0, 0)
return []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),

View File

@@ -17,7 +17,7 @@ import (
func TestService_newSlot(t *testing.T) {
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{})
fcs := protoarray.New(0, 0)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -25,11 +25,11 @@ func TestService_newSlot(t *testing.T) {
}
ctx := context.Background()
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, 0, 0)) // genesis
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 32, [32]byte{'a'}, [32]byte{}, 0, 0)) // finalized
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, 0, 0)) // justified
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 96, [32]byte{'c'}, [32]byte{'a'}, 0, 0)) // best justified
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 97, [32]byte{'d'}, [32]byte{}, 0, 0)) // bad
require.NoError(t, fcs.ProcessBlock(ctx, 0, [32]byte{}, [32]byte{}, 0, 0, true)) // genesis
require.NoError(t, fcs.ProcessBlock(ctx, 32, [32]byte{'a'}, [32]byte{}, 0, 0, true)) // finalized
require.NoError(t, fcs.ProcessBlock(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true)) // justified
require.NoError(t, fcs.ProcessBlock(ctx, 96, [32]byte{'c'}, [32]byte{'a'}, 0, 0, true)) // best justified
require.NoError(t, fcs.ProcessBlock(ctx, 97, [32]byte{'d'}, [32]byte{}, 0, 0, true)) // bad
type args struct {
slot types.Slot

View File

@@ -2,150 +2,13 @@ package blockchain
import (
"context"
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
v1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/sirupsen/logrus"
)
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.BeaconBlock, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
if headBlk == nil || headBlk.IsNil() || headBlk.Body().IsNil() {
return nil, errors.New("nil head block")
}
// Must not call fork choice updated until the transition conditions are met on the Pow network.
if isPreBellatrix(headBlk.Version()) {
return nil, nil
}
isExecutionBlk, err := blocks.ExecutionBlock(headBlk.Body())
if err != nil {
return nil, errors.Wrap(err, "could not determine if block is execution block")
}
if !isExecutionBlk {
return nil, nil
}
headPayload, err := headBlk.Body().ExecutionPayload()
if err != nil {
return nil, errors.Wrap(err, "could not get execution payload")
}
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, s.ensureRootNotZeros(finalizedRoot))
if err != nil {
return nil, errors.Wrap(err, "could not get finalized block")
}
var finalizedHash []byte
if isPreBellatrix(finalizedBlock.Block().Version()) {
finalizedHash = params.BeaconConfig().ZeroHash[:]
} else {
payload, err := finalizedBlock.Block().Body().ExecutionPayload()
if err != nil {
return nil, errors.Wrap(err, "could not get finalized block execution payload")
}
finalizedHash = payload.BlockHash
}
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: headPayload.BlockHash,
SafeBlockHash: headPayload.BlockHash,
FinalizedBlockHash: finalizedHash,
}
// payload attribute is only required when requesting payload, here we are just updating fork choice, so it is nil.
payloadID, validHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, nil /*payload attribute*/)
if err != nil {
switch err {
case v1.ErrAcceptedSyncingPayloadStatus:
log.WithFields(logrus.Fields{
"headSlot": headBlk.Slot(),
"headHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
"finalizedHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
}).Info("Called fork choice updated with optimistic block")
return payloadID, nil
case v1.ErrInvalidPayloadStatus:
if err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, bytesutil.ToBytes32(validHash)); err != nil {
return nil, err
}
return nil, errors.Wrap(err, "Received invalid hash from execution engine")
default:
return nil, errors.Wrap(err, "could not notify forkchoice update from execution engine")
}
}
return payloadID, nil
}
// notifyForkchoiceUpdate signals execution engine on a new payload
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int, header *ethpb.ExecutionPayloadHeader, postState state.BeaconState, blk block.SignedBeaconBlock) error {
if postState == nil {
return errors.New("pre and post states must not be nil")
}
// Execution payload is only supported in Bellatrix and beyond.
if isPreBellatrix(postState.Version()) {
return nil
}
if err := helpers.BeaconBlockIsNil(blk); err != nil {
return err
}
body := blk.Block().Body()
enabled, err := blocks.ExecutionEnabled(postState, blk.Block().Body())
if err != nil {
return errors.Wrap(err, "could not determine if execution is enabled")
}
if !enabled {
return nil
}
payload, err := body.ExecutionPayload()
if err != nil {
return errors.Wrap(err, "could not get execution payload")
}
validHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
if err != nil {
switch err {
case v1.ErrAcceptedSyncingPayloadStatus:
log.WithFields(logrus.Fields{
"slot": postState.Slot(),
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
}).Info("Called new payload with optimistic block")
return nil
case v1.ErrInvalidPayloadStatus:
if err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, bytesutil.ToBytes32(validHash)); err != nil {
return err
}
return errors.Wrap(err, "Received invalid hash from execution engine")
default:
return errors.Wrap(err, "could not validate execution payload from execution engine")
}
}
// During the transition event, the transition block should be verified for sanity.
if isPreBellatrix(preStateVersion) {
return nil
}
atTransition, err := blocks.IsMergeTransitionBlockUsingPayloadHeader(header, body)
if err != nil {
return errors.Wrap(err, "could not check if merge block is terminal")
}
if !atTransition {
return nil
}
return s.validateMergeBlock(ctx, blk)
}
// isPreBellatrix returns true if input version is before bellatrix fork.
func isPreBellatrix(v int) bool {
return v == version.Phase0 || v == version.Altair
}
// optimisticCandidateBlock returns true if this block can be optimistically synced.
//
// Spec pseudocode definition:

View File

@@ -7,334 +7,25 @@ import (
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
engine "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
"github.com/prysmaticlabs/prysm/time/slots"
)
func Test_NotifyForkchoiceUpdate(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
altairBlk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlockAltair())
require.NoError(t, err)
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
require.NoError(t, err)
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlockBellatrix())
require.NoError(t, err)
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, altairBlk))
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
tests := []struct {
name string
blk block.BeaconBlock
finalizedRoot [32]byte
newForkchoiceErr error
errString string
}{
{
name: "nil block",
errString: "nil head block",
},
{
name: "phase0 block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}})
require.NoError(t, err)
return b
}(),
},
{
name: "altair block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlockAltair{Body: &ethpb.BeaconBlockBodyAltair{}})
require.NoError(t, err)
return b
}(),
},
{
name: "not execution block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
},
},
})
require.NoError(t, err)
return b
}(),
},
{
name: "happy case: finalized root is altair block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
})
require.NoError(t, err)
return b
}(),
finalizedRoot: altairBlkRoot,
},
{
name: "happy case: finalized root is bellatrix block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
})
require.NoError(t, err)
return b
}(),
finalizedRoot: bellatrixBlkRoot,
},
{
name: "forkchoice updated with optimistic block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
})
require.NoError(t, err)
return b
}(),
newForkchoiceErr: engine.ErrAcceptedSyncingPayloadStatus,
finalizedRoot: bellatrixBlkRoot,
},
{
name: "forkchoice updated with invalid block",
blk: func() block.BeaconBlock {
b, err := wrapper.WrappedBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
})
require.NoError(t, err)
return b
}(),
newForkchoiceErr: engine.ErrInvalidPayloadStatus,
finalizedRoot: bellatrixBlkRoot,
errString: "could not notify forkchoice update from execution engine: payload status is INVALID",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
engine := &mockEngineService{forkchoiceError: tt.newForkchoiceErr}
service.cfg.ExecutionEngineCaller = engine
_, err := service.notifyForkchoiceUpdate(ctx, tt.blk, tt.finalizedRoot)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
}
})
}
}
func Test_NotifyNewPayload(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = "2"
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
phase0State, _ := util.DeterministicGenesisState(t, 1)
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
},
}
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
tests := []struct {
name string
preState state.BeaconState
postState state.BeaconState
blk block.SignedBeaconBlock
newPayloadErr error
errString string
}{
{
name: "phase 0 post state",
postState: phase0State,
preState: phase0State,
},
{
name: "altair post state",
postState: altairState,
preState: altairState,
},
{
name: "nil post state",
preState: phase0State,
errString: "pre and post states must not be nil",
},
{
name: "nil beacon block",
postState: bellatrixState,
preState: bellatrixState,
errString: "signed beacon block can't be nil",
},
{
name: "new payload with optimistic block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
newPayloadErr: engine.ErrAcceptedSyncingPayloadStatus,
},
{
name: "new payload with invalid block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
newPayloadErr: engine.ErrInvalidPayloadStatus,
errString: "could not validate execution payload from execution engine: payload status is INVALID",
},
{
name: "altair pre state",
postState: bellatrixState,
preState: altairState,
blk: bellatrixBlk,
},
{
name: "could not get merge block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
errString: "could not get merge block parent hash and total difficulty",
},
{
name: "not at merge transition",
postState: bellatrixState,
preState: bellatrixState,
blk: func() block.SignedBeaconBlock {
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
},
},
},
}
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
return b
}(),
},
{
name: "could not get merge block",
postState: bellatrixState,
preState: bellatrixState,
blk: bellatrixBlk,
errString: "could not get merge block parent hash and total difficulty",
},
{
name: "happy case",
postState: bellatrixState,
preState: bellatrixState,
blk: func() block.SignedBeaconBlock {
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
},
},
},
}
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
return b
}(),
},
}
for _, tt := range tests {
engine := &mockEngineService{newPayloadError: tt.newPayloadErr, blks: map[[32]byte]*v1.ExecutionBlock{}}
engine.blks[[32]byte{'a'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
TotalDifficulty: "0x2",
}
engine.blks[[32]byte{'b'}] = &v1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
TotalDifficulty: "0x1",
}
service.cfg.ExecutionEngineCaller = engine
var payload *ethpb.ExecutionPayloadHeader
if tt.preState.Version() == version.Bellatrix {
payload, err = tt.preState.LatestExecutionPayloadHeader()
require.NoError(t, err)
}
err := service.notifyNewPayload(ctx, tt.preState.Version(), payload, tt.postState, tt.blk)
if tt.errString != "" {
require.ErrorContains(t, tt.errString, err)
} else {
require.NoError(t, err)
}
}
}
func Test_IsOptimisticCandidateBlock(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
fcs := protoarray.New(0, 0)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -424,7 +115,7 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
blk.Block.Body.ExecutionPayload.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.LogsBloom = bytesutil.PadTo([]byte{'a'}, fieldparams.LogsBloomLength)
blk.Block.Body.ExecutionPayload.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.Random = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.BaseFeePerGas = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
blk.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)

View File

@@ -11,7 +11,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
enginev1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
@@ -51,14 +50,6 @@ func WithChainStartFetcher(f powchain.ChainStartFetcher) Option {
}
}
// WithExecutionEngineCaller to call execution engine.
func WithExecutionEngineCaller(c enginev1.Caller) Option {
return func(s *Service) error {
s.cfg.ExecutionEngineCaller = c
return nil
}
}
// WithDepositCache for deposit lifecycle after chain inclusion.
func WithDepositCache(c *depositcache.DepositCache) Option {
return func(s *Service) error {

View File

@@ -1,132 +1,20 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/holiman/uint256"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/time/slots"
"github.com/sirupsen/logrus"
)
// validateMergeBlock validates terminal block hash in the event of manual overrides before checking for total difficulty.
//
// def validate_merge_block(block: BeaconBlock) -> None:
// if TERMINAL_BLOCK_HASH != Hash32():
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
// return
//
// pow_block = get_pow_block(block.body.execution_payload.parent_hash)
// # Check if `pow_block` is available
// assert pow_block is not None
// pow_parent = get_pow_block(pow_block.parent_hash)
// # Check if `pow_parent` is available
// assert pow_parent is not None
// # Check if `pow_block` is a valid terminal PoW block
// assert is_valid_terminal_pow_block(pow_block, pow_parent)
func (s *Service) validateMergeBlock(ctx context.Context, b block.SignedBeaconBlock) error {
if err := helpers.BeaconBlockIsNil(b); err != nil {
return err
}
payload, err := b.Block().Body().ExecutionPayload()
if err != nil {
return err
}
if payload == nil {
return errors.New("nil execution payload")
}
if err := validateTerminalBlockHash(b.Block().Slot(), payload); err != nil {
return errors.Wrap(err, "could not validate terminal block hash")
}
mergeBlockParentHash, mergeBlockTD, err := s.getBlkParentHashAndTD(ctx, payload.ParentHash)
if err != nil {
return errors.Wrap(err, "could not get merge block parent hash and total difficulty")
}
_, mergeBlockParentTD, err := s.getBlkParentHashAndTD(ctx, mergeBlockParentHash)
if err != nil {
return errors.Wrap(err, "could not get merge parent block total difficulty")
}
valid, err := validateTerminalBlockDifficulties(mergeBlockTD, mergeBlockParentTD)
if err != nil {
return err
}
if !valid {
return fmt.Errorf("invalid TTD, configTTD: %s, currentTTD: %s, parentTTD: %s",
params.BeaconConfig().TerminalTotalDifficulty, mergeBlockTD, mergeBlockParentTD)
}
log.WithFields(logrus.Fields{
"slot": b.Block().Slot(),
"mergeBlockHash": common.BytesToHash(payload.ParentHash).String(),
"mergeBlockParentHash": common.BytesToHash(mergeBlockParentHash).String(),
"terminalTotalDifficulty": params.BeaconConfig().TerminalTotalDifficulty,
"mergeBlockTotalDifficulty": mergeBlockTD,
"mergeBlockParentTotalDifficulty": mergeBlockParentTD,
}).Info("Validated terminal block")
return nil
}
// getBlkParentHashAndTD retrieves the parent hash and total difficulty of the given block.
func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]byte, *uint256.Int, error) {
blk, err := s.cfg.ExecutionEngineCaller.ExecutionBlockByHash(ctx, common.BytesToHash(blkHash))
if err != nil {
return nil, nil, errors.Wrap(err, "could not get pow block")
}
if blk == nil {
return nil, nil, errors.New("pow block is nil")
}
blkTDBig, err := hexutil.DecodeBig(blk.TotalDifficulty)
if err != nil {
return nil, nil, errors.Wrap(err, "could not decode merge block total difficulty")
}
blkTDUint256, overflows := uint256.FromBig(blkTDBig)
if overflows {
return nil, nil, errors.New("total difficulty overflows")
}
return blk.ParentHash, blkTDUint256, nil
}
// validateTerminalBlockHash validates if the merge block is a valid terminal PoW block.
// spec code:
// if TERMINAL_BLOCK_HASH != Hash32():
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
// return
func validateTerminalBlockHash(blkSlot types.Slot, payload *enginev1.ExecutionPayload) error {
if bytesutil.ToBytes32(params.BeaconConfig().TerminalBlockHash.Bytes()) == [32]byte{} {
return nil
}
if params.BeaconConfig().TerminalBlockHashActivationEpoch > slots.ToEpoch(blkSlot) {
return errors.New("terminal block hash activation epoch not reached")
}
if !bytes.Equal(payload.ParentHash, params.BeaconConfig().TerminalBlockHash.Bytes()) {
return errors.New("parent hash does not match terminal block hash")
}
return nil
}
// validateTerminalBlockDifficulties validates terminal pow block by comparing own total difficulty with parent's total difficulty.
// validates terminal pow block by comparing own total difficulty with parent's total difficulty.
//
// def is_valid_terminal_pow_block(block: PowBlock, parent: PowBlock) -> bool:
// is_total_difficulty_reached = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
// is_parent_total_difficulty_valid = parent.total_difficulty < TERMINAL_TOTAL_DIFFICULTY
// return is_total_difficulty_reached and is_parent_total_difficulty_valid
func validateTerminalBlockDifficulties(currentDifficulty *uint256.Int, parentDifficulty *uint256.Int) (bool, error) {
func validTerminalPowBlock(currentDifficulty *uint256.Int, parentDifficulty *uint256.Int) (bool, error) {
b, ok := new(big.Int).SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
if !ok {
return false, errors.New("failed to parse terminal total difficulty")

View File

@@ -1,21 +1,12 @@
package blockchain
import (
"context"
"fmt"
"math/big"
"testing"
"github.com/holiman/uint256"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/testing/require"
)
@@ -75,10 +66,10 @@ func Test_validTerminalPowBlock(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = fmt.Sprint(tt.ttd)
params.OverrideBeaconConfig(cfg)
got, err := validateTerminalBlockDifficulties(tt.currentDifficulty, tt.parentDifficulty)
got, err := validTerminalPowBlock(tt.currentDifficulty, tt.parentDifficulty)
require.NoError(t, err)
if got != tt.want {
t.Errorf("validateTerminalBlockDifficulties() = %v, want %v", got, tt.want)
t.Errorf("validTerminalPowBlock() = %v, want %v", got, tt.want)
}
})
}
@@ -96,115 +87,7 @@ func Test_validTerminalPowBlockSpecConfig(t *testing.T) {
parent, of := uint256.FromBig(i)
require.Equal(t, of, false)
got, err := validateTerminalBlockDifficulties(current, parent)
got, err := validTerminalPowBlock(current, parent)
require.NoError(t, err)
require.Equal(t, true, got)
}
func Test_validateMergeBlock(t *testing.T) {
cfg := params.BeaconConfig()
cfg.TerminalTotalDifficulty = "2"
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
engine := &mockEngineService{blks: map[[32]byte]*enginev1.ExecutionBlock{}}
service.cfg.ExecutionEngineCaller = engine
engine.blks[[32]byte{'a'}] = &enginev1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
TotalDifficulty: "0x2",
}
engine.blks[[32]byte{'b'}] = &enginev1.ExecutionBlock{
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
TotalDifficulty: "0x1",
}
blk := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Slot: 1,
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &enginev1.ExecutionPayload{
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
},
},
},
}
b, err := wrapper.WrappedSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, service.validateMergeBlock(ctx, b))
cfg.TerminalTotalDifficulty = "1"
params.OverrideBeaconConfig(cfg)
require.ErrorContains(t, "invalid TTD, configTTD: 1, currentTTD: 2, parentTTD: 1", service.validateMergeBlock(ctx, b))
}
func Test_getBlkParentHashAndTD(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
engine := &mockEngineService{blks: map[[32]byte]*enginev1.ExecutionBlock{}}
service.cfg.ExecutionEngineCaller = engine
h := [32]byte{'a'}
p := [32]byte{'b'}
td := "0x1"
engine.blks[h] = &enginev1.ExecutionBlock{
ParentHash: p[:],
TotalDifficulty: td,
}
parentHash, totalDifficulty, err := service.getBlkParentHashAndTD(ctx, h[:])
require.NoError(t, err)
require.Equal(t, p, bytesutil.ToBytes32(parentHash))
require.Equal(t, td, totalDifficulty.String())
_, _, err = service.getBlkParentHashAndTD(ctx, []byte{'c'})
require.ErrorContains(t, "could not get pow block: block not found", err)
engine.blks[h] = nil
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
require.ErrorContains(t, "pow block is nil", err)
engine.blks[h] = &enginev1.ExecutionBlock{
ParentHash: p[:],
TotalDifficulty: "1",
}
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
require.ErrorContains(t, "could not decode merge block total difficulty: hex string without 0x prefix", err)
engine.blks[h] = &enginev1.ExecutionBlock{
ParentHash: p[:],
TotalDifficulty: "0XFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
}
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
require.ErrorContains(t, "could not decode merge block total difficulty: hex number > 256 bits", err)
}
func Test_validateTerminalBlockHash(t *testing.T) {
require.NoError(t, validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
cfg := params.BeaconConfig()
cfg.TerminalBlockHash = [32]byte{0x01}
params.OverrideBeaconConfig(cfg)
require.ErrorContains(t, "terminal block hash activation epoch not reached", validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
cfg.TerminalBlockHashActivationEpoch = 0
params.OverrideBeaconConfig(cfg)
require.ErrorContains(t, "parent hash does not match terminal block hash", validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
require.NoError(t, validateTerminalBlockHash(1, &enginev1.ExecutionPayload{ParentHash: cfg.TerminalBlockHash.Bytes()}))
}

View File

@@ -8,7 +8,6 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
@@ -22,13 +21,13 @@ import (
"github.com/prysmaticlabs/prysm/time/slots"
)
func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
WithForkChoiceStore(protoarray.New(0, 0)),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
@@ -128,117 +127,11 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
}
}
func TestStore_OnAttestation_ErrorConditions_DoublyLinkedTree(t *testing.T) {
func TestStore_OnAttestation_Ok(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(doublylinkedtree.New(0, 0)),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
_, err = blockTree1(t, beaconDB, []byte{'g'})
require.NoError(t, err)
BlkWithOutState := util.NewBeaconBlock()
BlkWithOutState.Block.Slot = 0
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithOutState)))
BlkWithOutStateRoot, err := BlkWithOutState.Block.HashTreeRoot()
require.NoError(t, err)
BlkWithStateBadAtt := util.NewBeaconBlock()
BlkWithStateBadAtt.Block.Slot = 1
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithStateBadAtt)))
BlkWithStateBadAttRoot, err := BlkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
s, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.SetSlot(100*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
BlkWithValidState := util.NewBeaconBlock()
BlkWithValidState.Block.Slot = 2
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithValidState)))
BlkWithValidStateRoot, err := BlkWithValidState.Block.HashTreeRoot()
require.NoError(t, err)
s, err = util.NewBeaconState()
require.NoError(t, err)
err = s.SetFork(&ethpb.Fork{
Epoch: 0,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
})
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithValidStateRoot))
tests := []struct {
name string
a *ethpb.Attestation
wantedErr string
}{
{
name: "attestation's data slot not aligned with target vote",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}}}),
wantedErr: "slot 32 does not match target epoch 0",
},
{
name: "no pre state for attestations's target block",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
wantedErr: "could not get pre state for epoch 0",
},
{
name: "process attestation doesn't match current epoch",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Epoch: 100,
Root: BlkWithStateBadAttRoot[:]}}}),
wantedErr: "target epoch 100 does not match current epoch",
},
{
name: "process nil attestation",
a: nil,
wantedErr: "attestation can't be nil",
},
{
name: "process nil field (a.Data) in attestation",
a: &ethpb.Attestation{},
wantedErr: "attestation's data can't be nil",
},
{
name: "process nil field (a.Target) in attestation",
a: &ethpb.Attestation{
Data: &ethpb.AttestationData{
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
Target: nil,
Source: &ethpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
},
AggregationBits: make([]byte, 1),
Signature: make([]byte, 96),
},
wantedErr: "attestation's target can't be nil",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := service.OnAttestation(ctx, tt.a)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
} else {
assert.NoError(t, err)
}
})
}
}
func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
fcs := protoarray.New(0, 0)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -256,33 +149,7 @@ func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, 1, 1))
require.NoError(t, service.OnAttestation(ctx, att[0]))
}
func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New(0, 0)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesisState, pks := util.DeterministicGenesisState(t, 64)
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
require.NoError(t, err)
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
copied := genesisState.Copy()
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, 1, 1))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, 1, 1, false))
require.NoError(t, service.OnAttestation(ctx, att[0]))
}
@@ -463,42 +330,11 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
assert.NoError(t, service.verifyBeaconBlock(ctx, d), "Did not receive the wanted error")
}
func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
b32 := util.NewBeaconBlock()
b32.Block.Slot = 32
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
r32, err := b32.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
b33 := util.NewBeaconBlock()
b33.Block.Slot = 33
b33.Block.ParentRoot = r32[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
require.ErrorContains(t, "Root and finalized store are not consistent", err)
}
func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New(0, 0)
fcs := protoarray.New(0, 0)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -571,8 +407,8 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
r33, err := b33.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, b32.Block.Slot, r32, [32]byte{}, 0, 0))
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, b33.Block.Slot, r33, r32, 0, 0))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b32.Block.Slot, r32, [32]byte{}, 0, 0, false))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b33.Block.Slot, r33, r32, 0, 0, false))
_, err = service.cfg.ForkChoiceStore.Head(ctx, 0, r32, []uint64{}, 0)
require.NoError(t, err)

View File

@@ -21,7 +21,6 @@ import (
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/time/slots"
"go.opencensus.io/trace"
)
@@ -99,22 +98,10 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
return err
}
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
if err != nil {
return err
}
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
if err != nil {
return err
}
if err := s.notifyNewPayload(ctx, preStateVersion, preStateHeader, postState, signed); err != nil {
return errors.Wrap(err, "could not verify new payload")
}
// TODO(10261) Check optimistic status
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
return err
}
// We add a proposer score boost to fork choice for the block root if applicable, right after
// running a successful state transition for the block.
@@ -124,6 +111,10 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
return err
}
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */, false /* optimistic */); err != nil {
return err
}
// If slasher is configured, forward the attestations in the block via
// an event feed for processing.
if features.Get().EnableSlasher {
@@ -183,9 +174,6 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
if err := s.updateHead(ctx, balances); err != nil {
log.WithError(err).Warn("Could not update head")
}
if _, err := s.notifyForkchoiceUpdate(ctx, s.headBlock().Block(), bytesutil.ToBytes32(finalized.Root)); err != nil {
return err
}
if err := s.pruneCanonicalAttsFromPool(ctx, blockRoot, signed); err != nil {
return err
@@ -258,21 +246,6 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
return s.handleEpochBoundary(ctx, postState)
}
func getStateVersionAndPayload(preState state.BeaconState) (int, *ethpb.ExecutionPayloadHeader, error) {
var preStateHeader *ethpb.ExecutionPayloadHeader
var err error
preStateVersion := preState.Version()
switch preStateVersion {
case version.Phase0, version.Altair:
default:
preStateHeader, err = preState.LatestExecutionPayloadHeader()
if err != nil {
return 0, nil, err
}
}
return preStateVersion, preStateHeader, nil
}
func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlock,
blockRoots [][32]byte) ([]*ethpb.Checkpoint, []*ethpb.Checkpoint, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
@@ -350,15 +323,11 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
// handles a block after the block's batch has been verified, where we can save blocks
// their state summaries and split them off to relative hot/cold storage.
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed block.SignedBeaconBlock,
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint, optimistic bool) error {
b := signed.Block()
s.saveInitSyncBlock(blockRoot, signed)
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint); err != nil {
return err
}
// TODO(10261) send optimistic status
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint, optimistic); err != nil {
return err
}
@@ -446,13 +415,13 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
// This feeds in the block and block's attestations to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk block.BeaconBlock, root [32]byte,
st state.BeaconState) error {
st state.BeaconState, optimistic bool) error {
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockAndAttestationsToForkChoiceStore")
defer span.End()
fCheckpoint := st.FinalizedCheckpoint()
jCheckpoint := st.CurrentJustifiedCheckpoint()
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint); err != nil {
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint, optimistic); err != nil {
return err
}
// Feed in block's attestations to fork choice store.
@@ -471,24 +440,23 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
}
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk block.BeaconBlock,
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint, optimistic bool) error {
//TODO(10261) check if the blocks are optimistic or not when filling fork choice
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
return err
}
// Feed in block to fork choice store.
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
blk.Slot(), root, bytesutil.ToBytes32(blk.ParentRoot()),
jCheckpoint.Epoch,
fCheckpoint.Epoch); err != nil {
jCheckpoint.Epoch, fCheckpoint.Epoch, optimistic); err != nil {
return errors.Wrap(err, "could not process block for proto array fork choice")
}
// TODO(10261) send optimistic status
return s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, root)
return nil
}
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.SignedBeaconBlock, st state.BeaconState, initSync bool) error {
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.SignedBeaconBlock, st state.BeaconState, initSync bool, optimistic bool) error {
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
defer span.End()
if initSync {
@@ -499,7 +467,7 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.Sig
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
return errors.Wrap(err, "could not save state")
}
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st); err != nil {
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st, optimistic); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block().Slot())
}
return nil

View File

@@ -366,17 +366,14 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk block.B
for i := len(pendingNodes) - 1; i >= 0; i-- {
b := pendingNodes[i]
r := pendingRoots[i]
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
b.Slot(), r, bytesutil.ToBytes32(b.ParentRoot()),
jCheckpoint.Epoch,
fCheckpoint.Epoch); err != nil {
fCheckpoint.Epoch, false /* optimistic status */); err != nil {
return errors.Wrap(err, "could not process block for proto array fork choice")
}
// TODO(10261) send optimistic status
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, r); err != nil {
return err
}
}
return nil
}

View File

@@ -16,7 +16,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
@@ -28,18 +27,17 @@ import (
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
"github.com/prysmaticlabs/prysm/runtime/version"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
"github.com/prysmaticlabs/prysm/testing/util"
prysmTime "github.com/prysmaticlabs/prysm/time"
)
func TestStore_OnBlock_ProtoArray(t *testing.T) {
func TestStore_OnBlock(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
fcs := protoarray.New(0, 0)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -132,122 +130,7 @@ func TestStore_OnBlock_ProtoArray(t *testing.T) {
}
}
func TestStore_OnBlock_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New(0, 0)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
require.NoError(t, err)
random := util.NewBeaconBlock()
random.Block.Slot = 1
random.Block.ParentRoot = validGenesisRoot[:]
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(random)))
randomParentRoot, err := random.Block.HashTreeRoot()
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), randomParentRoot))
randomParentRoot2 := roots[1]
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)))
tests := []struct {
name string
blk *ethpb.SignedBeaconBlock
s state.BeaconState
time uint64
wantErrString string
}{
{
name: "parent block root does not have a state",
blk: util.NewBeaconBlock(),
s: st.Copy(),
wantErrString: "could not reconstruct parent state",
},
{
name: "block is from the future",
blk: func() *ethpb.SignedBeaconBlock {
b := util.NewBeaconBlock()
b.Block.ParentRoot = randomParentRoot2
b.Block.Slot = params.BeaconConfig().FarFutureSlot
return b
}(),
s: st.Copy(),
wantErrString: "is in the far distant future",
},
{
name: "could not get finalized block",
blk: func() *ethpb.SignedBeaconBlock {
b := util.NewBeaconBlock()
b.Block.ParentRoot = randomParentRoot[:]
return b
}(),
s: st.Copy(),
wantErrString: "is not a descendant of the current finalized block",
},
{
name: "same slot as finalized block",
blk: func() *ethpb.SignedBeaconBlock {
b := util.NewBeaconBlock()
b.Block.Slot = 0
b.Block.ParentRoot = randomParentRoot2
return b
}(),
s: st.Copy(),
wantErrString: "block is equal or earlier than finalized block, slot 0 < slot 0",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetBestJustifiedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: roots[0]})
service.store.SetPrevFinalizedCheckpt(&ethpb.Checkpoint{Root: validGenesisRoot[:]})
root, err := tt.blk.Block.HashTreeRoot()
assert.NoError(t, err)
err = service.onBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(tt.blk), root)
assert.ErrorContains(t, tt.wantErrString, err)
})
}
}
func TestStore_OnBlock_ProposerBoostEarly(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := doublylinkedtree.New(0, 0)
opts := []Option{
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.BoostProposerRoot(ctx, 0, [32]byte{'A'}, time.Now()))
_, err = service.cfg.ForkChoiceStore.Head(ctx, 0,
params.BeaconConfig().ZeroHash, []uint64{}, 0)
require.ErrorContains(t, "could not apply proposer boost score: invalid proposer boost root", err)
}
func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
func TestStore_OnBlockBatch(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
@@ -265,59 +148,7 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
st, keys := util.DeterministicGenesisState(t, 64)
bState := st.Copy()
var blks []block.SignedBeaconBlock
var blkRoots [][32]byte
var firstState state.BeaconState
for i := 1; i < 10; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wrapper.WrappedPhase0SignedBeaconBlock(b))
require.NoError(t, err)
if i == 1 {
firstState = bState.Copy()
}
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
service.saveInitSyncBlock(root, wrapper.WrappedPhase0SignedBeaconBlock(b))
blks = append(blks, wrapper.WrappedPhase0SignedBeaconBlock(b))
blkRoots = append(blkRoots, root)
}
rBlock, err := blks[0].PbPhase0Block()
assert.NoError(t, err)
rBlock.Block.ParentRoot = gRoot[:]
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
_, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
require.NoError(t, err)
}
func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
st, keys := util.DeterministicGenesisState(t, 64)
@@ -384,7 +215,7 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
assert.Equal(t, true, update, "Should be able to update justified")
}
func TestShouldUpdateJustified_ReturnFalse_ProtoArray(t *testing.T) {
func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
ctx := context.Background()
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
@@ -392,7 +223,7 @@ func TestShouldUpdateJustified_ReturnFalse_ProtoArray(t *testing.T) {
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
lastJustifiedBlk := util.NewBeaconBlock()
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
@@ -413,36 +244,7 @@ func TestShouldUpdateJustified_ReturnFalse_ProtoArray(t *testing.T) {
assert.Equal(t, false, update, "Should not be able to update justified, received true")
}
func TestShouldUpdateJustified_ReturnFalse_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
lastJustifiedBlk := util.NewBeaconBlock()
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
newJustifiedBlk := util.NewBeaconBlock()
newJustifiedBlk.Block.ParentRoot = bytesutil.PadTo(lastJustifiedRoot[:], 32)
newJustifiedRoot, err := newJustifiedBlk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(newJustifiedBlk)))
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(lastJustifiedBlk)))
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
service.store.SetJustifiedCheckpt(&ethpb.Checkpoint{Root: lastJustifiedRoot[:]})
update, err := service.shouldUpdateCurrentJustified(ctx, &ethpb.Checkpoint{Root: newJustifiedRoot[:]})
require.NoError(t, err)
assert.Equal(t, false, update, "Should not be able to update justified, received true")
}
func TestCachedPreState_CanGetFromStateSummary_ProtoArray(t *testing.T) {
func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
@@ -462,38 +264,7 @@ func TestCachedPreState_CanGetFromStateSummary_ProtoArray(t *testing.T) {
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
b := util.NewBeaconBlock()
b.Block.Slot = 1
b.Block.ParentRoot = gRoot[:]
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: 1, Root: gRoot[:]}))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, gRoot, s))
require.NoError(t, service.verifyBlkPreState(ctx, wrapper.WrappedPhase0BeaconBlock(b.Block)))
}
func TestCachedPreState_CanGetFromStateSummary_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
s, err := v1.InitializeFromProto(&ethpb.BeaconState{Slot: 1, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
require.NoError(t, err)
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
b := util.NewBeaconBlock()
@@ -521,7 +292,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
b := util.NewBeaconBlock()
@@ -546,7 +317,7 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
WithForkChoiceStore(protoarray.New(0, 0)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
@@ -576,7 +347,7 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
assert.Equal(t, types.Epoch(2), service.store.BestJustifiedCheckpt().Epoch, "Incorrect justified epoch in service")
}
func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
@@ -586,7 +357,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
genesisStateRoot := [32]byte{}
@@ -617,7 +388,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])), "Didn't save node")
}
func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
@@ -627,48 +398,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
require.NoError(t, err)
beaconState, _ := util.DeterministicGenesisState(t, 32)
block := util.NewBeaconBlock()
block.Block.Slot = 9
block.Block.ParentRoot = roots[8]
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])), "Didn't save node")
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])), "Didn't save node")
}
func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
genesisStateRoot := [32]byte{}
@@ -702,7 +432,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
}
}
func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
@@ -712,104 +442,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: make([]byte, 32)})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
require.NoError(t, err)
beaconState, _ := util.DeterministicGenesisState(t, 32)
block := util.NewBeaconBlock()
block.Block.Slot = 9
block.Block.ParentRoot = roots[8]
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// Ensure all roots and their respective blocks exist.
wantedRoots := [][]byte{roots[0], roots[3], roots[4], roots[6], roots[8]}
for i, rt := range wantedRoots {
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(rt)), fmt.Sprintf("Didn't save node: %d", i))
assert.Equal(t, true, service.cfg.BeaconDB.HasBlock(context.Background(), bytesutil.ToBytes32(rt)))
}
}
func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
// Set finalized epoch to 1.
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
genesisStateRoot := [32]byte{}
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
validGenesisRoot, err := genesis.Block.HashTreeRoot()
assert.NoError(t, err)
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
// Define a tree branch, slot 63 <- 64 <- 65
b63 := util.NewBeaconBlock()
b63.Block.Slot = 63
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b63)))
r63, err := b63.Block.HashTreeRoot()
require.NoError(t, err)
b64 := util.NewBeaconBlock()
b64.Block.Slot = 64
b64.Block.ParentRoot = r63[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b64)))
r64, err := b64.Block.HashTreeRoot()
require.NoError(t, err)
b65 := util.NewBeaconBlock()
b65.Block.Slot = 65
b65.Block.ParentRoot = r64[:]
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b65)))
beaconState, _ := util.DeterministicGenesisState(t, 32)
err = service.fillInForkChoiceMissingBlocks(
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b65).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
require.NoError(t, err)
// There should be 2 nodes, block 65 and block 64.
assert.Equal(t, 2, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
// Block with slot 63 should be in fork choice because it's less than finalized epoch 1.
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r63), "Didn't save node")
}
func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
// Set finalized epoch to 1.
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 1})
@@ -962,7 +595,7 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
fcs := protoarray.New(0, 0)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -1035,7 +668,7 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
r, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), 0, 0)) // Saves blocks to fork choice store.
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), 0, 0, false)) // Saves blocks to fork choice store.
}
r, err := service.ancestor(context.Background(), r200[:], 150)
@@ -1049,7 +682,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
fcs := protoarray.New(0, 0)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -1080,7 +713,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock))) // Saves blocks to DB.
}
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(context.Background(), 200, r200, r200, 0, 0))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), 200, r200, r200, 0, 0, false))
r, err := service.ancestor(context.Background(), r200[:], 150)
require.NoError(t, err)
@@ -1107,7 +740,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
fcs := protoarray.New(0, 0, [32]byte{'a'})
fcs := protoarray.New(0, 0)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
@@ -1237,7 +870,7 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
func TestOnBlock_CanFinalize(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
fcs := protoarray.New(0, 0)
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
@@ -1280,48 +913,6 @@ func TestOnBlock_CanFinalize(t *testing.T) {
require.Equal(t, f.Epoch, service.FinalizedCheckpt().Epoch)
}
func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.AltairForkEpoch = 1
config.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(config)
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fcs := protoarray.New(0, 0, [32]byte{'a'})
depositCache, err := depositcache.New()
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(fcs),
WithDepositCache(depositCache),
WithStateNotifier(&mock.MockStateNotifier{}),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
require.NoError(t, err)
gRoot, err := gBlk.Block().HashTreeRoot()
require.NoError(t, err)
service.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Root: gRoot[:]})
testState := gs.Copy()
for i := types.Slot(1); i < params.BeaconConfig().SlotsPerEpoch; i++ {
blk, err := util.GenerateFullBlock(testState, keys, util.DefaultBlockGenConfig(), i)
require.NoError(t, err)
r, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk), r))
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
}
func TestInsertFinalizedDeposits(t *testing.T) {
ctx := context.Background()
opts := testServiceOptsWithDB(t)
@@ -1405,53 +996,3 @@ func TestRemoveBlockAttestationsInPool_NonCanonical(t *testing.T) {
require.NoError(t, service.pruneCanonicalAttsFromPool(ctx, r, wrapper.WrappedPhase0SignedBeaconBlock(b)))
require.Equal(t, 1, service.cfg.AttPool.AggregatedAttestationCount())
}
func Test_getStateVersionAndPayload(t *testing.T) {
tests := []struct {
name string
st state.BeaconState
version int
header *ethpb.ExecutionPayloadHeader
}{
{
name: "phase 0 state",
st: func() state.BeaconState {
s, _ := util.DeterministicGenesisState(t, 1)
return s
}(),
version: version.Phase0,
header: (*ethpb.ExecutionPayloadHeader)(nil),
},
{
name: "altair state",
st: func() state.BeaconState {
s, _ := util.DeterministicGenesisStateAltair(t, 1)
return s
}(),
version: version.Altair,
header: (*ethpb.ExecutionPayloadHeader)(nil),
},
{
name: "bellatrix state",
st: func() state.BeaconState {
s, _ := util.DeterministicGenesisStateBellatrix(t, 1)
require.NoError(t, s.SetLatestExecutionPayloadHeader(&ethpb.ExecutionPayloadHeader{
BlockNumber: 1,
}))
return s
}(),
version: version.Bellatrix,
header: &ethpb.ExecutionPayloadHeader{
BlockNumber: 1,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
version, header, err := getStateVersionAndPayload(tt.st)
require.NoError(t, err)
require.Equal(t, tt.version, version)
require.DeepEqual(t, tt.header, header)
})
}
}

View File

@@ -111,7 +111,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, 1, 1))
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, 1, 1, false))
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
service.processAttestations(ctx)
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations()))

View File

@@ -86,8 +86,9 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBe
for i, b := range blocks {
blockCopy := b.Copy()
// TODO(10261) check optimistic status
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy,
blkRoots[i], fCheckpoints[i],
jCheckpoints[i], false /* optimistic status */); err != nil {
tracing.AnnotateError(span, err)
return err
}

View File

@@ -127,7 +127,7 @@ func TestService_ReceiveBlock(t *testing.T) {
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
WithForkChoiceStore(protoarray.New(0, 0)),
WithAttestationPool(attestations.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
@@ -164,7 +164,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
WithForkChoiceStore(protoarray.New(0, 0)),
WithAttestationPool(attestations.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
@@ -240,11 +240,9 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
beaconDB := testDB.SetupDB(t)
genesisBlockRoot, err := genesis.HashTreeRoot(ctx)
require.NoError(t, err)
opts := []Option{
WithDatabase(beaconDB),
WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)),
WithForkChoiceStore(protoarray.New(0, 0)),
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
WithStateGen(stategen.New(beaconDB)),
}

View File

@@ -21,18 +21,15 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
f "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
enginev1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/config/features"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
@@ -85,9 +82,7 @@ type config struct {
StateGen *stategen.State
SlasherAttestationsFeed *event.Feed
WeakSubjectivityCheckpt *ethpb.Checkpoint
BlockFetcher powchain.POWBlockFetcher
FinalizedStateAtStartUp state.BeaconState
ExecutionEngineCaller enginev1.Caller
}
// NewService instantiates a new block service instance that will
@@ -135,7 +130,6 @@ func (s *Service) Start() {
log.Fatal(err)
}
}
s.spawnProcessAttestationsRoutine(s.cfg.StateNotifier.StateFeed())
}
// Stop the blockchain service's main event loop and associated goroutines.
@@ -166,7 +160,7 @@ func (s *Service) Status() error {
func (s *Service) startFromSavedState(saved state.BeaconState) error {
log.Info("Blockchain data already exists in DB, initializing...")
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0)
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
originRoot, err := s.originRootFromSavedState(s.ctx)
@@ -190,12 +184,7 @@ func (s *Service) startFromSavedState(saved state.BeaconState) error {
}
s.store = store.New(justified, finalized)
var store f.ForkChoicer
if features.Get().EnableForkChoiceDoublyLinkedTree {
store = doublylinkedtree.New(justified.Epoch, finalized.Epoch)
} else {
store = protoarray.New(justified.Epoch, finalized.Epoch, bytesutil.ToBytes32(finalized.Root))
}
store := protoarray.New(justified.Epoch, finalized.Epoch)
s.cfg.ForkChoiceStore = store
ss, err := slots.EpochStart(finalized.Epoch)
@@ -228,6 +217,8 @@ func (s *Service) startFromSavedState(saved state.BeaconState) error {
},
})
s.spawnProcessAttestationsRoutine(s.cfg.StateNotifier.StateFeed())
return nil
}
@@ -336,6 +327,7 @@ func (s *Service) startFromPOWChain() error {
stateChannel := make(chan *feed.Event, 1)
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
s.spawnProcessAttestationsRoutine(s.cfg.StateNotifier.StateFeed())
for {
select {
case event := <-stateChannel:
@@ -449,17 +441,14 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
genesisCheckpoint := genesisState.FinalizedCheckpoint()
s.store = store.New(genesisCheckpoint, genesisCheckpoint)
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
genesisBlk.Block().Slot(),
genesisBlkRoot,
params.BeaconConfig().ZeroHash,
genesisCheckpoint.Epoch,
genesisCheckpoint.Epoch); err != nil {
genesisCheckpoint.Epoch, false); err != nil {
log.Fatalf("Could not process genesis block for fork choice: %v", err)
}
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, genesisBlkRoot); err != nil {
log.Fatalf("Could not set optimistic status of genesis block to false: %v", err)
}
s.setHead(genesisBlkRoot, genesisBlk, genesisState)
return nil

View File

@@ -19,7 +19,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
@@ -122,7 +121,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
WithAttestationPool(attestations.NewPool()),
WithP2PBroadcaster(&mockBroadcaster{}),
WithStateNotifier(&mockBeaconNode{}),
WithForkChoiceStore(protoarray.New(0, 0, params.BeaconConfig().ZeroHash)),
WithForkChoiceStore(protoarray.New(0, 0)),
WithAttestationService(attService),
WithStateGen(stateGen),
}
@@ -447,11 +446,11 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
}
}
func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
func TestHasBlock_ForkChoiceAndDB(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
cfg: &config{ForkChoiceStore: protoarray.New(0, 0), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
@@ -460,26 +459,7 @@ func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
require.NoError(t, err)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
}
func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
s := &Service{
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
block := util.NewBeaconBlock()
r, err := block.Block.HashTreeRoot()
require.NoError(t, err)
beaconState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
@@ -533,11 +513,11 @@ func BenchmarkHasBlockDB(b *testing.B) {
}
}
func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
func BenchmarkHasBlockForkChoiceStore(b *testing.B) {
ctx := context.Background()
beaconDB := testDB.SetupDB(b)
s := &Service{
cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
cfg: &config{ForkChoiceStore: protoarray.New(0, 0), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
@@ -547,28 +527,7 @@ func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
bs := &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := v1.InitializeFromProto(bs)
require.NoError(b, err)
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
b.ResetTimer()
for i := 0; i < b.N; i++ {
require.Equal(b, true, s.cfg.ForkChoiceStore.HasNode(r), "Block is not in fork choice store")
}
}
func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
ctx := context.Background()
beaconDB := testDB.SetupDB(b)
s := &Service{
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
store: &store.Store{},
}
s.store.SetFinalizedCheckpt(&ethpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
block := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
r, err := block.Block.HashTreeRoot()
require.NoError(b, err)
bs := &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := v1.InitializeFromProto(bs)
require.NoError(b, err)
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
b.ResetTimer()
for i := 0; i < b.N; i++ {

View File

@@ -18,7 +18,7 @@ go_library(
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",

View File

@@ -18,7 +18,7 @@ import (
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
@@ -32,40 +32,34 @@ var ErrNilState = errors.New("nil state")
// ChainService defines the mock interface for testing
type ChainService struct {
Optimistic bool
ValidAttestation bool
ValidatorsRoot [32]byte
PublicKey [fieldparams.BLSPubkeyLength]byte
State state.BeaconState
Root []byte
Block block.SignedBeaconBlock
FinalizedCheckPoint *ethpb.Checkpoint
CurrentJustifiedCheckPoint *ethpb.Checkpoint
PreviousJustifiedCheckPoint *ethpb.Checkpoint
Slot *types.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
BlocksReceived []block.SignedBeaconBlock
Balance *precompute.Balance
Genesis time.Time
ValidatorsRoot [32]byte
CanonicalRoots map[[32]byte]bool
Fork *ethpb.Fork
ETH1Data *ethpb.Eth1Data
InitSyncBlockRoots map[[32]byte]bool
DB db.Database
State state.BeaconState
Block block.SignedBeaconBlock
VerifyBlkDescendantErr error
stateNotifier statefeed.Notifier
BlocksReceived []block.SignedBeaconBlock
SyncCommitteeIndices []types.CommitteeIndex
blockNotifier blockfeed.Notifier
opNotifier opfeed.Notifier
Root []byte
ValidAttestation bool
ForkChoiceStore *protoarray.Store
VerifyBlkDescendantErr error
Slot *types.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
SyncCommitteeIndices []types.CommitteeIndex
SyncCommitteeDomain []byte
SyncSelectionProofDomain []byte
SyncContributionProofDomain []byte
PublicKey [fieldparams.BLSPubkeyLength]byte
SyncCommitteePubkeys [][]byte
Genesis time.Time
ForkChoiceStore forkchoice.ForkChoicer
}
// ForkChoicer mocks the same method in the chain service
func (s *ChainService) ForkChoicer() forkchoice.ForkChoicer {
return s.ForkChoiceStore
InitSyncBlockRoots map[[32]byte]bool
}
// StateNotifier mocks the same method in the chain service.
@@ -325,6 +319,11 @@ func (s *ChainService) HeadETH1Data() *ethpb.Eth1Data {
return s.ETH1Data
}
// ProtoArrayStore mocks the same method in the chain service.
func (s *ChainService) ProtoArrayStore() *protoarray.Store {
return s.ForkChoiceStore
}
// GenesisTime mocks the same method in the chain service.
func (s *ChainService) GenesisTime() time.Time {
return s.Genesis
@@ -442,11 +441,11 @@ func (s *ChainService) HeadSyncContributionProofDomain(_ context.Context, _ type
}
// IsOptimistic mocks the same method in the chain service.
func (s *ChainService) IsOptimistic(_ context.Context) (bool, error) {
return s.Optimistic, nil
func (s *ChainService) IsOptimistic() (bool, error) {
return false, nil
}
// IsOptimisticForRoot mocks the same method in the chain service.
func (s *ChainService) IsOptimisticForRoot(_ context.Context, _ [32]byte) (bool, error) {
return s.Optimistic, nil
func (s *ChainService) IsOptimisticForRoot(_ [32]byte) (bool, error) {
return false, nil
}

View File

@@ -1,4 +1,3 @@
//go:build !fuzz
// +build !fuzz
package cache
@@ -18,12 +17,10 @@ import (
"github.com/prysmaticlabs/prysm/config/params"
)
const (
// maxBalanceCacheSize defines the max number of active balances can cache.
maxBalanceCacheSize = int(4)
)
var (
// maxBalanceCacheSize defines the max number of active balances can cache.
maxBalanceCacheSize = uint64(4)
// BalanceCacheMiss tracks the number of balance requests that aren't present in the cache.
balanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "total_effective_balance_cache_miss",
@@ -45,7 +42,7 @@ type BalanceCache struct {
// NewEffectiveBalanceCache creates a new effective balance cache for storing/accessing total balance by epoch.
func NewEffectiveBalanceCache() *BalanceCache {
return &BalanceCache{
cache: lruwrpr.New(maxBalanceCacheSize),
cache: lruwrpr.New(int(maxBalanceCacheSize)),
}
}

View File

@@ -1,4 +1,3 @@
//go:build !fuzz
// +build !fuzz
package cache
@@ -20,13 +19,11 @@ import (
mathutil "github.com/prysmaticlabs/prysm/math"
)
const (
var (
// maxCommitteesCacheSize defines the max number of shuffled committees on per randao basis can cache.
// Due to reorgs and long finality, it's good to keep the old cache around for quickly switch over.
maxCommitteesCacheSize = int(32)
)
maxCommitteesCacheSize = uint64(32)
var (
// CommitteeCacheMiss tracks the number of committee requests that aren't present in the cache.
CommitteeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
Name: "committee_cache_miss",
@@ -58,7 +55,7 @@ func committeeKeyFn(obj interface{}) (string, error) {
// NewCommitteesCache creates a new committee cache for storing/accessing shuffled indices of a committee.
func NewCommitteesCache() *CommitteeCache {
return &CommitteeCache{
CommitteeCache: lruwrpr.New(maxCommitteesCacheSize),
CommitteeCache: lruwrpr.New(int(maxCommitteesCacheSize)),
inProgress: make(map[string]bool),
}
}

View File

@@ -33,7 +33,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
require.NoError(t, err)
}
assert.Equal(t, maxCommitteesCacheSize, len(cache.CommitteeCache.Keys()), "Incorrect key size")
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.Keys())), "Incorrect key size")
}
func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
@@ -50,5 +50,5 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
assert.DeepEqual(t, c.SortedIndices, indices)
}
assert.Equal(t, maxCommitteesCacheSize, len(cache.CommitteeCache.Keys()), "Incorrect key size")
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.Keys())), "Incorrect key size")
}

View File

@@ -102,7 +102,7 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
}
k := cache.CommitteeCache.Keys()
assert.Equal(t, maxCommitteesCacheSize, len(k))
assert.Equal(t, maxCommitteesCacheSize, uint64(len(k)))
sort.Slice(k, func(i, j int) bool {
return k[i].(string) < k[j].(string)

View File

@@ -27,7 +27,7 @@ var SubnetIDs = newSubnetIDs()
func newSubnetIDs() *subnetIDs {
// Given a node can calculate committee assignments of current epoch and next epoch.
// Max size is set to 2 epoch length.
cacheSize := int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxCommitteesPerSlot * 2)) // lint:ignore uintcast -- constant values that would panic on startup if negative.
cacheSize := int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxCommitteesPerSlot * 2))
attesterCache := lruwrpr.New(cacheSize)
aggregatorCache := lruwrpr.New(cacheSize)
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))

View File

@@ -225,7 +225,7 @@ func verifyDeposit(beaconState state.ReadOnlyBeaconState, deposit *ethpb.Deposit
if ok := trie.VerifyMerkleProofWithDepth(
receiptRoot,
leaf[:],
beaconState.Eth1DepositIndex(),
int(beaconState.Eth1DepositIndex()),
deposit.Proof,
params.BeaconConfig().DepositContractTreeDepth,
); !ok {

View File

@@ -23,8 +23,6 @@ import (
// Spec code:
// def is_merge_transition_complete(state: BeaconState) -> bool:
// return state.latest_execution_payload_header != ExecutionPayloadHeader()
//
// Deprecated: Use `IsMergeTransitionBlockUsingPayloadHeader` instead.
func MergeTransitionComplete(st state.BeaconState) (bool, error) {
h, err := st.LatestExecutionPayloadHeader()
if err != nil {
@@ -53,16 +51,6 @@ func MergeTransitionBlock(st state.BeaconState, body block.BeaconBlockBody) (boo
return ExecutionBlock(body)
}
// IsMergeTransitionBlockUsingPayloadHeader returns true if the input block is the terminal merge block.
// Terminal merge block must be associated with an empty payload header.
// This is an optimized version of MergeTransitionComplete where beacon state is not required as an argument.
func IsMergeTransitionBlockUsingPayloadHeader(h *ethpb.ExecutionPayloadHeader, body block.BeaconBlockBody) (bool, error) {
if !isEmptyHeader(h) {
return false, nil
}
return ExecutionBlock(body)
}
// ExecutionBlock returns whether the block has a non-empty ExecutionPayload.
//
// Spec code:
@@ -136,8 +124,8 @@ func ValidatePayload(st state.BeaconState, payload *enginev1.ExecutionPayload) e
return err
}
if !bytes.Equal(payload.PrevRandao, random) {
return errors.New("incorrect prev randao")
if !bytes.Equal(payload.Random, random) {
return errors.New("incorrect random")
}
t, err := slots.ToTime(st.GenesisTime(), st.Slot())
if err != nil {
@@ -213,7 +201,7 @@ func PayloadToHeader(payload *enginev1.ExecutionPayload) (*ethpb.ExecutionPayloa
StateRoot: bytesutil.SafeCopyBytes(payload.StateRoot),
ReceiptRoot: bytesutil.SafeCopyBytes(payload.ReceiptsRoot),
LogsBloom: bytesutil.SafeCopyBytes(payload.LogsBloom),
PrevRandao: bytesutil.SafeCopyBytes(payload.PrevRandao),
Random: bytesutil.SafeCopyBytes(payload.Random),
BlockNumber: payload.BlockNumber,
GasLimit: payload.GasLimit,
GasUsed: payload.GasUsed,
@@ -241,7 +229,7 @@ func isEmptyPayload(p *enginev1.ExecutionPayload) bool {
if !bytes.Equal(p.LogsBloom, make([]byte, fieldparams.LogsBloomLength)) {
return false
}
if !bytes.Equal(p.PrevRandao, make([]byte, fieldparams.RootLength)) {
if !bytes.Equal(p.Random, make([]byte, fieldparams.RootLength)) {
return false
}
if !bytes.Equal(p.BaseFeePerGas, make([]byte, fieldparams.RootLength)) {
@@ -287,7 +275,7 @@ func isEmptyHeader(h *ethpb.ExecutionPayloadHeader) bool {
if !bytes.Equal(h.LogsBloom, make([]byte, fieldparams.LogsBloomLength)) {
return false
}
if !bytes.Equal(h.PrevRandao, make([]byte, fieldparams.RootLength)) {
if !bytes.Equal(h.Random, make([]byte, fieldparams.RootLength)) {
return false
}
if !bytes.Equal(h.BaseFeePerGas, make([]byte, fieldparams.RootLength)) {

View File

@@ -78,7 +78,7 @@ func Test_MergeComplete(t *testing.T) {
name: "has random",
payload: func() *ethpb.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
h.Random = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
want: true,
@@ -246,7 +246,7 @@ func Test_MergeBlock(t *testing.T) {
name: "empty header, payload has random",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
p.Random = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
@@ -350,185 +350,6 @@ func Test_MergeBlock(t *testing.T) {
}
}
func Test_IsMergeTransitionBlockUsingPayloadHeader(t *testing.T) {
tests := []struct {
name string
payload *enginev1.ExecutionPayload
header *ethpb.ExecutionPayloadHeader
want bool
}{
{
name: "empty header, empty payload",
payload: emptyPayload(),
header: emptyPayloadHeader(),
want: false,
},
{
name: "non-empty header, empty payload",
payload: emptyPayload(),
header: func() *ethpb.ExecutionPayloadHeader {
h := emptyPayloadHeader()
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return h
}(),
want: false,
},
{
name: "empty header, payload has parent hash",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has fee recipient",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.FeeRecipient = bytesutil.PadTo([]byte{'a'}, fieldparams.FeeRecipientLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has state root",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has receipt root",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has logs bloom",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.LogsBloom = bytesutil.PadTo([]byte{'a'}, fieldparams.LogsBloomLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has random",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has base fee",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.BaseFeePerGas = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has block hash",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has tx",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.Transactions = [][]byte{{'a'}}
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has extra data",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.ExtraData = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has block number",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.BlockNumber = 1
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has gas limit",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.GasLimit = 1
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has gas used",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.GasUsed = 1
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
{
name: "empty header, payload has timestamp",
payload: func() *enginev1.ExecutionPayload {
p := emptyPayload()
p.Timestamp = 1
return p
}(),
header: emptyPayloadHeader(),
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
blk := util.NewBeaconBlockBellatrix()
blk.Block.Body.ExecutionPayload = tt.payload
body, err := wrapper.WrappedBellatrixBeaconBlockBody(blk.Block.Body)
require.NoError(t, err)
got, err := blocks.IsMergeTransitionBlockUsingPayloadHeader(tt.header, body)
require.NoError(t, err)
if got != tt.want {
t.Errorf("MergeTransitionBlock() got = %v, want %v", got, tt.want)
}
})
}
}
func Test_IsExecutionBlock(t *testing.T) {
tests := []struct {
name string
@@ -699,21 +520,21 @@ func Test_ValidatePayload(t *testing.T) {
name: "validate passes",
payload: func() *enginev1.ExecutionPayload {
h := emptyPayload()
h.PrevRandao = random
h.Random = random
h.Timestamp = uint64(ts.Unix())
return h
}(), err: nil,
},
{
name: "incorrect prev randao",
name: "incorrect random",
payload: emptyPayload(),
err: errors.New("incorrect prev randao"),
err: errors.New("incorrect random"),
},
{
name: "incorrect timestamp",
payload: func() *enginev1.ExecutionPayload {
h := emptyPayload()
h.PrevRandao = random
h.Random = random
h.Timestamp = 1
return h
}(),
@@ -747,21 +568,21 @@ func Test_ProcessPayload(t *testing.T) {
name: "process passes",
payload: func() *enginev1.ExecutionPayload {
h := emptyPayload()
h.PrevRandao = random
h.Random = random
h.Timestamp = uint64(ts.Unix())
return h
}(), err: nil,
},
{
name: "incorrect prev randao",
name: "incorrect random",
payload: emptyPayload(),
err: errors.New("incorrect prev randao"),
err: errors.New("incorrect random"),
},
{
name: "incorrect timestamp",
payload: func() *enginev1.ExecutionPayload {
h := emptyPayload()
h.PrevRandao = random
h.Random = random
h.Timestamp = 1
return h
}(),
@@ -800,7 +621,7 @@ func Test_PayloadToHeader(t *testing.T) {
p.StateRoot = b
p.ReceiptsRoot = b
p.LogsBloom = b
p.PrevRandao = b
p.Random = b
p.ExtraData = b
p.BaseFeePerGas = b
p.BlockHash = b
@@ -814,7 +635,7 @@ func Test_PayloadToHeader(t *testing.T) {
require.DeepSSZEqual(t, h.StateRoot, make([]byte, fieldparams.RootLength))
require.DeepSSZEqual(t, h.ReceiptRoot, make([]byte, fieldparams.RootLength))
require.DeepSSZEqual(t, h.LogsBloom, make([]byte, fieldparams.LogsBloomLength))
require.DeepSSZEqual(t, h.PrevRandao, make([]byte, fieldparams.RootLength))
require.DeepSSZEqual(t, h.Random, make([]byte, fieldparams.RootLength))
require.DeepSSZEqual(t, h.ExtraData, make([]byte, 0))
require.DeepSSZEqual(t, h.BaseFeePerGas, make([]byte, fieldparams.RootLength))
require.DeepSSZEqual(t, h.BlockHash, make([]byte, fieldparams.RootLength))
@@ -842,7 +663,7 @@ func emptyPayloadHeader() *ethpb.ExecutionPayloadHeader {
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
Random: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
@@ -857,7 +678,7 @@ func emptyPayload() *enginev1.ExecutionPayload {
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
Random: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
Transactions: make([][]byte, 0),

View File

@@ -71,7 +71,7 @@ func UpgradeToBellatrix(ctx context.Context, state state.BeaconState) (state.Bea
StateRoot: make([]byte, 32),
ReceiptRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
PrevRandao: make([]byte, 32),
Random: make([]byte, 32),
BlockNumber: 0,
GasLimit: 0,
GasUsed: 0,

View File

@@ -67,7 +67,7 @@ func TestUpgradeToBellatrix(t *testing.T) {
StateRoot: make([]byte, 32),
ReceiptRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
PrevRandao: make([]byte, 32),
Random: make([]byte, 32),
BlockNumber: 0,
GasLimit: 0,
GasUsed: 0,

View File

@@ -11,22 +11,18 @@ import (
"github.com/prysmaticlabs/prysm/time/slots"
)
var ErrNilSignedBeaconBlock = errors.New("signed beacon block can't be nil")
var ErrNilBeaconBlock = errors.New("beacon block can't be nil")
var ErrNilBeaconBlockBody = errors.New("beacon block body can't be nil")
// BeaconBlockIsNil checks if any composite field of input signed beacon block is nil.
// Access to these nil fields will result in run time panic,
// it is recommended to run these checks as first line of defense.
func BeaconBlockIsNil(b block.SignedBeaconBlock) error {
if b == nil || b.IsNil() {
return ErrNilSignedBeaconBlock
return errors.New("signed beacon block can't be nil")
}
if b.Block().IsNil() {
return ErrNilBeaconBlock
return errors.New("beacon block can't be nil")
}
if b.Block().Body().IsNil() {
return ErrNilBeaconBlockBody
return errors.New("beacon block body can't be nil")
}
return nil
}

View File

@@ -244,7 +244,7 @@ func createFullBellatrixBlockWithOperations(t *testing.T) (state.BeaconState,
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
Random: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
Transactions: make([][]byte, 0),

View File

@@ -92,7 +92,7 @@ func ExecuteStateTransition(
func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.ProcessSlot")
defer span.End()
span.AddAttributes(trace.Int64Attribute("slot", int64(state.Slot()))) // lint:ignore uintcast -- This is OK for tracing.
span.AddAttributes(trace.Int64Attribute("slot", int64(state.Slot())))
prevStateRoot, err := state.HashTreeRoot(ctx)
if err != nil {
@@ -190,7 +190,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
if state == nil || state.IsNil() {
return nil, errors.New("nil state")
}
span.AddAttributes(trace.Int64Attribute("slots", int64(slot)-int64(state.Slot()))) // lint:ignore uintcast -- This is OK for tracing.
span.AddAttributes(trace.Int64Attribute("slots", int64(slot)-int64(state.Slot())))
// The block must have a higher slot than parent state.
if state.Slot() >= slot {
@@ -354,7 +354,7 @@ func VerifyOperationLengths(_ context.Context, state state.BeaconState, b block.
func ProcessEpochPrecompute(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.ProcessEpochPrecompute")
defer span.End()
span.AddAttributes(trace.Int64Attribute("epoch", int64(time.CurrentEpoch(state)))) // lint:ignore uintcast -- This is OK for tracing.
span.AddAttributes(trace.Int64Attribute("epoch", int64(time.CurrentEpoch(state))))
if state == nil || state.IsNil() {
return nil, errors.New("nil state")

View File

@@ -137,6 +137,16 @@ func CalculateStateRoot(
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not process block")
}
if signed.Version() == version.Altair || signed.Version() == version.Bellatrix {
sa, err := signed.Block().Body().SyncAggregate()
if err != nil {
return [32]byte{}, err
}
state, err = altair.ProcessSyncAggregate(ctx, state, sa)
if err != nil {
return [32]byte{}, err
}
}
return state.HashTreeRoot(ctx)
}
@@ -173,6 +183,16 @@ func ProcessBlockNoVerifyAnySig(
if err != nil {
return nil, nil, err
}
if signed.Version() == version.Altair || signed.Version() == version.Bellatrix {
sa, err := signed.Block().Body().SyncAggregate()
if err != nil {
return nil, nil, err
}
state, err = altair.ProcessSyncAggregate(ctx, state, sa)
if err != nil {
return nil, nil, err
}
}
bSet, err := b.BlockSignatureBatch(state, blk.ProposerIndex(), signed.Signature(), blk.HashTreeRoot)
if err != nil {
@@ -320,19 +340,6 @@ func ProcessBlockForStateRoot(
return nil, errors.Wrap(err, "could not process block operation")
}
if signed.Block().Version() == version.Phase0 {
return state, nil
}
sa, err := signed.Block().Body().SyncAggregate()
if err != nil {
return nil, errors.Wrap(err, "could not get sync aggregate from block")
}
state, err = altair.ProcessSyncAggregate(ctx, state, sa)
if err != nil {
return nil, errors.Wrap(err, "process_sync_aggregate failed")
}
return state, nil
}

View File

@@ -7,9 +7,3 @@ import "github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
// i/o error. This variable copies the value in the kv package to the same scope as the Database interfaces,
// so that it is available to code paths that do not interact directly with the kv package.
var ErrNotFound = kv.ErrNotFound
// ErrNotFoundState wraps ErrNotFound for an error specific to a state not being found in the database.
var ErrNotFoundState = kv.ErrNotFoundState
// ErrNotFoundOriginBlockRoot wraps ErrNotFound for an error specific to the origin block root.
var ErrNotFoundOriginBlockRoot = kv.ErrNotFoundOriginBlockRoot

View File

@@ -30,9 +30,9 @@ type ReadOnlyDatabase interface {
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (block.SignedBeaconBlock, error)
HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]block.SignedBeaconBlock, error)
ValidatedTips(ctx context.Context) (map[[32]byte]types.Slot, error)
// State related methods.
State(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
StateOrError(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
GenesisState(ctx context.Context) (state.BeaconState, error)
HasState(ctx context.Context, blockRoot [32]byte) bool
StateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error)
@@ -49,8 +49,7 @@ type ReadOnlyDatabase interface {
DepositContractAddress(ctx context.Context) ([]byte, error)
// Powchain operations.
PowchainData(ctx context.Context) (*ethpb.ETH1ChainData, error)
// Fee reicipients operations.
FeeRecipientByValidatorID(ctx context.Context, id uint64) (common.Address, error)
// origin checkpoint sync support
OriginBlockRoot(ctx context.Context) ([32]byte, error)
}
@@ -64,6 +63,7 @@ type NoHeadAccessDatabase interface {
SaveBlock(ctx context.Context, block block.SignedBeaconBlock) error
SaveBlocks(ctx context.Context, blocks []block.SignedBeaconBlock) error
SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error
UpdateValidatedTips(ctx context.Context, newVals map[[32]byte]types.Slot) error
// State related methods.
SaveState(ctx context.Context, state state.ReadOnlyBeaconState, blockRoot [32]byte) error
SaveStates(ctx context.Context, states []state.ReadOnlyBeaconState, blockRoots [][32]byte) error
@@ -80,8 +80,6 @@ type NoHeadAccessDatabase interface {
SavePowchainData(ctx context.Context, data *ethpb.ETH1ChainData) error
// Run any required database migrations.
RunMigrations(ctx context.Context) error
// Fee reicipients operations.
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []uint64, addrs []common.Address) error
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint types.Slot) error
}

View File

@@ -25,6 +25,7 @@ go_library(
"state_summary.go",
"state_summary_cache.go",
"utils.go",
"validated_tips.go",
"wss.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/db/kv",
@@ -80,6 +81,7 @@ go_test(
"checkpoint_test.go",
"deposit_contract_test.go",
"encoding_test.go",
"error_test.go",
"finalized_block_roots_test.go",
"genesis_test.go",
"init_test.go",
@@ -91,6 +93,7 @@ go_test(
"state_summary_test.go",
"state_test.go",
"utils_test.go",
"validated_tips_test.go",
],
data = glob(["testdata/**"]),
embed = [":go_default_library"],
@@ -113,7 +116,6 @@ go_test(
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_golang_snappy//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
"@io_etcd_go_bbolt//:go_default_library",

View File

@@ -5,7 +5,6 @@ import (
"context"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/golang/snappy"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
@@ -393,44 +392,6 @@ func (s *Store) HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]
return []block.SignedBeaconBlock{blk}, nil
}
// FeeRecipientByValidatorID returns the fee recipient for a validator id.
// `ErrNotFoundFeeRecipient` is returned if the validator id is not found.
func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id uint64) (common.Address, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.FeeRecipientByValidatorID")
defer span.End()
var addr []byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(feeRecipientBucket)
addr = bkt.Get(bytesutil.Uint64ToBytesBigEndian(id))
if addr == nil {
return errors.Wrapf(ErrNotFoundFeeRecipient, "validator id %d", id)
}
return nil
})
return common.BytesToAddress(addr), err
}
// SaveFeeRecipientsByValidatorIDs saves the fee recipients for validator ids.
// Error is returned if `ids` and `recipients` are not the same length.
func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []uint64, feeRecipients []common.Address) error {
_, span := trace.StartSpan(ctx, "BeaconDB.SaveFeeRecipientByValidatorID")
defer span.End()
if len(ids) != len(feeRecipients) {
return errors.New("validatorIDs and feeRecipients must be the same length")
}
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(feeRecipientBucket)
for i, id := range ids {
if err := bkt.Put(bytesutil.Uint64ToBytesBigEndian(id), feeRecipients[i].Bytes()); err != nil {
return err
}
}
return nil
})
}
// blockRootsByFilter retrieves the block roots given the filter criteria.
func blockRootsByFilter(ctx context.Context, tx *bolt.Tx, f *filters.QueryFilter) ([][]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsByFilter")

View File

@@ -4,8 +4,6 @@ import (
"context"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/config/params"
@@ -591,27 +589,3 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
})
}
}
func TestStore_FeeRecipientByValidatorID(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
ids := []uint64{0, 0, 0}
feeRecipients := []common.Address{{}, {}, {}, {}}
require.ErrorContains(t, "validatorIDs and feeRecipients must be the same length", db.SaveFeeRecipientsByValidatorIDs(ctx, ids, feeRecipients))
ids = []uint64{0, 1, 2}
feeRecipients = []common.Address{{'a'}, {'b'}, {'c'}}
require.NoError(t, db.SaveFeeRecipientsByValidatorIDs(ctx, ids, feeRecipients))
f, err := db.FeeRecipientByValidatorID(ctx, 0)
require.NoError(t, err)
require.Equal(t, common.Address{'a'}, f)
f, err = db.FeeRecipientByValidatorID(ctx, 1)
require.NoError(t, err)
require.Equal(t, common.Address{'b'}, f)
f, err = db.FeeRecipientByValidatorID(ctx, 2)
require.NoError(t, err)
require.Equal(t, common.Address{'c'}, f)
_, err = db.FeeRecipientByValidatorID(ctx, 3)
want := errors.Wrap(ErrNotFoundFeeRecipient, "validator id 3")
require.Equal(t, want.Error(), err.Error())
}

View File

@@ -1,6 +1,6 @@
package kv
import "github.com/pkg/errors"
import "errors"
// errDeleteFinalized is raised when we attempt to delete a finalized block/state
var errDeleteFinalized = errors.New("cannot delete finalized block or state")
@@ -8,10 +8,42 @@ var errDeleteFinalized = errors.New("cannot delete finalized block or state")
// ErrNotFound can be used directly, or as a wrapped DBError, whenever a db method needs to
// indicate that a value couldn't be found.
var ErrNotFound = errors.New("not found in db")
var ErrNotFoundState = errors.Wrap(ErrNotFound, "state not found")
// ErrNotFoundOriginBlockRoot is an error specifically for the origin block root getter
var ErrNotFoundOriginBlockRoot = errors.Wrap(ErrNotFound, "OriginBlockRoot")
var ErrNotFoundOriginBlockRoot = WrapDBError(ErrNotFound, "OriginBlockRoot")
// ErrNotFoundFeeRecipient is a not found error specifically for the fee recipient getter
var ErrNotFoundFeeRecipient = errors.Wrap(ErrNotFound, "fee recipient")
// WrapDBError wraps an error in a DBError. See commentary on DBError for more context.
func WrapDBError(e error, outer string) error {
return DBError{
Wraps: e,
Outer: errors.New(outer),
}
}
// DBError implements the Error method so that it can be asserted as an error.
// The Unwrap method supports error wrapping, enabling it to be used with errors.Is/As.
// The primary use case is to make it simple for database methods to return errors
// that wrap ErrNotFound, allowing calling code to check for "not found" errors
// like: `error.Is(err, ErrNotFound)`. This is intended to improve error handling
// in db lookup methods that need to differentiate between a missing value and some
// other database error. for more background see:
// https://go.dev/blog/go1.13-errors
type DBError struct {
Wraps error
Outer error
}
// Error satisfies the error interface, so that DBErrors can be used anywhere that
// expects an `error`.
func (e DBError) Error() string {
es := e.Outer.Error()
if e.Wraps != nil {
es += ": " + e.Wraps.Error()
}
return es
}
// Unwrap is used by the errors package Is and As methods.
func (e DBError) Unwrap() error {
return e.Wraps
}

View File

@@ -0,0 +1,24 @@
package kv
import (
"errors"
"testing"
)
func TestWrappedSentinelError(t *testing.T) {
e := ErrNotFoundOriginBlockRoot
if !errors.Is(e, ErrNotFoundOriginBlockRoot) {
t.Error("expected that a copy of ErrNotFoundOriginBlockRoot would have an is-a relationship")
}
outer := errors.New("wrapped error")
e2 := DBError{Wraps: ErrNotFoundOriginBlockRoot, Outer: outer}
if !errors.Is(e2, ErrNotFoundOriginBlockRoot) {
t.Error("expected that errors.Is would know DBError wraps ErrNotFoundOriginBlockRoot")
}
// test that the innermost not found error is detected
if !errors.Is(e2, ErrNotFound) {
t.Error("expected that errors.Is would know ErrNotFoundOriginBlockRoot wraps ErrNotFound")
}
}

View File

@@ -191,8 +191,6 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
newStateServiceCompatibleBucket,
// Migrations
migrationsBucket,
feeRecipientBucket,
)
}); err != nil {
log.WithField("elapsed", time.Since(start)).Error("Failed to update db and create buckets")

View File

@@ -18,7 +18,6 @@ var (
checkpointBucket = []byte("check-point")
powchainBucket = []byte("powchain")
stateValidatorsBucket = []byte("state-validators")
feeRecipientBucket = []byte("fee-recipient")
validatedTips = []byte("validated-synced-tips")
// Deprecated: This bucket was migrated in PR 6461. Do not use, except for migrations.

View File

@@ -3,7 +3,6 @@ package kv
import (
"bytes"
"context"
"fmt"
"github.com/golang/snappy"
"github.com/pkg/errors"
@@ -47,19 +46,6 @@ func (s *Store) State(ctx context.Context, blockRoot [32]byte) (state.BeaconStat
return s.unmarshalState(ctx, enc, valEntries)
}
// StateOrError is just like State(), except it only returns a non-error response
// if the requested state is found in the database.
func (s *Store) StateOrError(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
st, err := s.State(ctx, blockRoot)
if err != nil {
return nil, err
}
if st == nil || st.IsNil() {
return nil, errors.Wrap(ErrNotFoundState, fmt.Sprintf("no state with blockroot=%#x", blockRoot))
}
return st, nil
}
// GenesisState returns the genesis state in beacon chain.
func (s *Store) GenesisState(ctx context.Context) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.GenesisState")

View File

@@ -21,12 +21,6 @@ import (
bolt "go.etcd.io/bbolt"
)
func TestStateNil(t *testing.T) {
db := setupDB(t)
_, err := db.StateOrError(context.Background(), [32]byte{})
require.ErrorIs(t, err, ErrNotFoundState)
}
func TestState_CanSaveRetrieve(t *testing.T) {
db := setupDB(t)

View File

@@ -0,0 +1,71 @@
package kv
import (
"context"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
// ValidatedTips returns all the validated_tips that are present in the DB.
func (s *Store) ValidatedTips(ctx context.Context) (map[[32]byte]types.Slot, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.ValidatedTips")
defer span.End()
valTips := make(map[[32]byte]types.Slot, 1)
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(validatedTips)
c := bkt.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
if ctx.Err() != nil {
return ctx.Err()
}
valTips[bytesutil.ToBytes32(k)] = bytesutil.BytesToSlotBigEndian(v)
}
return nil
})
return valTips, err
}
// UpdateValidatedTips clears off all the old validated_tips from the DB and
// adds the new tips that are provided.
func (s *Store) UpdateValidatedTips(ctx context.Context, newVals map[[32]byte]types.Slot) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.UpdateValidatedTips")
defer span.End()
// Get the already existing tips.
oldVals, err := s.ValidatedTips(ctx)
if err != nil {
return err
}
updateErr := s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(validatedTips)
// Delete keys that are present and not in the new set.
for k := range oldVals {
if _, ok := newVals[k]; !ok {
deleteErr := bkt.Delete(k[:])
if deleteErr != nil {
return deleteErr
}
}
}
// Add keys not present already.
for k, v := range newVals {
if _, ok := oldVals[k]; !ok {
putErr := bkt.Put(k[:], bytesutil.SlotToBytesBigEndian(v))
if putErr != nil {
return putErr
}
}
}
return nil
})
return updateErr
}

View File

@@ -0,0 +1,93 @@
package kv
import (
"context"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestTips_AddNewTips(t *testing.T) {
ctx := context.Background()
db := setupDB(t)
newTips := make(map[[32]byte]types.Slot)
newTips[[32]byte{'A'}] = types.Slot(1)
newTips[[32]byte{'B'}] = types.Slot(2)
newTips[[32]byte{'C'}] = types.Slot(3)
require.NoError(t, db.UpdateValidatedTips(ctx, newTips))
gotTips, err := db.ValidatedTips(ctx)
require.NoError(t, err)
require.Equal(t, true, areTipsSame(gotTips, newTips))
}
func TestTips_UpdateTipsWithoutOverlap(t *testing.T) {
ctx := context.Background()
db := setupDB(t)
oldTips := make(map[[32]byte]types.Slot)
oldTips[[32]byte{'A'}] = types.Slot(1)
oldTips[[32]byte{'B'}] = types.Slot(2)
oldTips[[32]byte{'C'}] = types.Slot(3)
require.NoError(t, db.UpdateValidatedTips(ctx, oldTips))
// create a new non-overlapping tips to add
newTips := make(map[[32]byte]types.Slot)
newTips[[32]byte{'D'}] = types.Slot(4)
newTips[[32]byte{'E'}] = types.Slot(5)
newTips[[32]byte{'F'}] = types.Slot(6)
require.NoError(t, db.UpdateValidatedTips(ctx, newTips))
gotTips, err := db.ValidatedTips(ctx)
require.NoError(t, err)
require.Equal(t, true, areTipsSame(gotTips, newTips))
}
func TestTips_UpdateTipsWithOverlap(t *testing.T) {
ctx := context.Background()
db := setupDB(t)
oldTips := make(map[[32]byte]types.Slot)
oldTips[[32]byte{'A'}] = types.Slot(1)
oldTips[[32]byte{'B'}] = types.Slot(2)
oldTips[[32]byte{'C'}] = types.Slot(3)
require.NoError(t, db.UpdateValidatedTips(ctx, oldTips))
// create a new overlapping tips to add
newTips := make(map[[32]byte]types.Slot)
newTips[[32]byte{'C'}] = types.Slot(3)
newTips[[32]byte{'D'}] = types.Slot(4)
newTips[[32]byte{'E'}] = types.Slot(5)
require.NoError(t, db.UpdateValidatedTips(ctx, newTips))
gotTips, err := db.ValidatedTips(ctx)
require.NoError(t, err)
require.Equal(t, true, areTipsSame(gotTips, newTips))
}
func areTipsSame(got map[[32]byte]types.Slot, required map[[32]byte]types.Slot) bool {
if len(got) != len(required) {
return false
}
for k, v := range got {
if val, ok := required[k]; ok {
if uint64(v) != uint64(val) {
return false
}
} else {
return false
}
}
return true
}

View File

@@ -12,8 +12,7 @@ go_library(
"//testing/spectest:__subpackages__",
],
deps = [
"//config/fieldparams:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
],
)

View File

@@ -1,55 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"errors.go",
"forkchoice.go",
"metrics.go",
"node.go",
"optimistic_sync.go",
"proposer_boost.go",
"store.go",
"types.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree",
visibility = [
"//beacon-chain:__subpackages__",
"//testing/spectest:__subpackages__",
],
deps = [
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"ffg_update_test.go",
"forkchoice_test.go",
"no_vote_test.go",
"node_test.go",
"optimistic_sync_test.go",
"proposer_boost_test.go",
"store_test.go",
"vote_test.go",
],
embed = [":go_default_library"],
deps = [
"//config/params:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
],
)

View File

@@ -1,4 +0,0 @@
/*
Package doublylinkedtree implements eth2 LMD GHOST fork choice using the doubly linked proto array node structure.
*/
package doublylinkedtree

View File

@@ -1,10 +0,0 @@
package doublylinkedtree
import "errors"
var errNilNode = errors.New("invalid nil or unknown node")
var errInvalidBalance = errors.New("invalid node balance")
var errInvalidProposerBoostRoot = errors.New("invalid proposer boost root")
var errUnknownFinalizedRoot = errors.New("unknown finalized root")
var errUnknownJustifiedRoot = errors.New("unknown justified root")
var errInvalidOptimisticStatus = errors.New("invalid optimistic status")

View File

@@ -1,193 +0,0 @@
package doublylinkedtree
import (
"context"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestFFGUpdates_OneBranch(t *testing.T) {
balances := []uint64{1, 1}
f := setup(0, 0)
// The head should always start at the finalized block.
r, err := f.Head(context.Background(), 0, params.BeaconConfig().ZeroHash, balances, 0)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().ZeroHash, r, "Incorrect head with genesis")
// Define the following tree:
// 0 <- justified: 0, finalized: 0
// |
// 1 <- justified: 0, finalized: 0
// |
// 2 <- justified: 1, finalized: 0
// |
// 3 <- justified: 2, finalized: 1
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(3), indexToHash(2), 2, 1))
// With starting justified epoch at 0, the head should be 3:
// 0 <- start
// |
// 1
// |
// 2
// |
// 3 <- head
r, err = f.Head(context.Background(), 0, params.BeaconConfig().ZeroHash, balances, 0)
require.NoError(t, err)
assert.Equal(t, indexToHash(3), r, "Incorrect head for with justified epoch at 0")
// With starting justified epoch at 1, the head should be 2:
// 0
// |
// 1 <- start
// |
// 2 <- head
// |
// 3
r, err = f.Head(context.Background(), 1, indexToHash(2), balances, 0)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head with justified epoch at 1")
// With starting justified epoch at 2, the head should be 3:
// 0
// |
// 1
// |
// 2 <- start
// |
// 3 <- head
r, err = f.Head(context.Background(), 2, indexToHash(3), balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(3), r, "Incorrect head with justified epoch at 2")
}
func TestFFGUpdates_TwoBranches(t *testing.T) {
balances := []uint64{1, 1}
f := setup(0, 0)
r, err := f.Head(context.Background(), 0, params.BeaconConfig().ZeroHash, balances, 0)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().ZeroHash, r, "Incorrect head with genesis")
// Define the following tree:
// 0
// / \
// justified: 0, finalized: 0 -> 1 2 <- justified: 0, finalized: 0
// | |
// justified: 1, finalized: 0 -> 3 4 <- justified: 0, finalized: 0
// | |
// justified: 1, finalized: 0 -> 5 6 <- justified: 0, finalized: 0
// | |
// justified: 1, finalized: 0 -> 7 8 <- justified: 1, finalized: 0
// | |
// justified: 2, finalized: 0 -> 9 10 <- justified: 2, finalized: 0
// Left branch.
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(3), indexToHash(1), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(5), indexToHash(3), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(7), indexToHash(5), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(9), indexToHash(7), 2, 0))
// Right branch.
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(4), indexToHash(2), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(6), indexToHash(4), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(8), indexToHash(6), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(10), indexToHash(8), 2, 0))
// With start at 0, the head should be 10:
// 0 <-- start
// / \
// 1 2
// | |
// 3 4
// | |
// 5 6
// | |
// 7 8
// | |
// 9 10 <-- head
r, err = f.Head(context.Background(), 0, params.BeaconConfig().ZeroHash, balances, 0)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head with justified epoch at 0")
// Add a vote to 1:
// 0
// / \
// +1 vote -> 1 2
// | |
// 3 4
// | |
// 5 6
// | |
// 7 8
// | |
// 9 10
f.ProcessAttestation(context.Background(), []uint64{0}, indexToHash(1), 0)
// With the additional vote to the left branch, the head should be 9:
// 0 <-- start
// / \
// 1 2
// | |
// 3 4
// | |
// 5 6
// | |
// 7 8
// | |
// head -> 9 10
r, err = f.Head(context.Background(), 0, params.BeaconConfig().ZeroHash, balances, 0)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head with justified epoch at 0")
// Add a vote to 2:
// 0
// / \
// 1 2 <- +1 vote
// | |
// 3 4
// | |
// 5 6
// | |
// 7 8
// | |
// 9 10
f.ProcessAttestation(context.Background(), []uint64{1}, indexToHash(2), 0)
// With the additional vote to the right branch, the head should be 10:
// 0 <-- start
// / \
// 1 2
// | |
// 3 4
// | |
// 5 6
// | |
// 7 8
// | |
// 9 10 <-- head
r, err = f.Head(context.Background(), 0, params.BeaconConfig().ZeroHash, balances, 0)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head with justified epoch at 0")
r, err = f.Head(context.Background(), 1, indexToHash(1), balances, 0)
require.NoError(t, err)
assert.Equal(t, indexToHash(7), r, "Incorrect head with justified epoch at 0")
}
func setup(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
ctx := context.Background()
f := New(justifiedEpoch, finalizedEpoch)
err := f.InsertOptimisticBlock(ctx, 0, params.BeaconConfig().ZeroHash, [32]byte{}, justifiedEpoch, finalizedEpoch)
if err != nil {
return nil
}
return f
}

View File

@@ -1,57 +0,0 @@
package doublylinkedtree
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
headSlotNumber = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "doublylinkedtree_head_slot",
Help: "The slot number of the current head.",
},
)
nodeCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "doublylinkedtree_node_count",
Help: "The number of nodes in the DAG array based store structure.",
},
)
headChangesCount = promauto.NewCounter(
prometheus.CounterOpts{
Name: "doublylinkedtree_head_changed_count",
Help: "The number of times head changes.",
},
)
calledHeadCount = promauto.NewCounter(
prometheus.CounterOpts{
Name: "doublylinkedtree_head_requested_count",
Help: "The number of times someone called head.",
},
)
processedBlockCount = promauto.NewCounter(
prometheus.CounterOpts{
Name: "doublylinkedtree_block_processed_count",
Help: "The number of times a block is processed for fork choice.",
},
)
processedAttestationCount = promauto.NewCounter(
prometheus.CounterOpts{
Name: "doublylinkedtree_attestation_processed_count",
Help: "The number of times an attestation is processed for fork choice.",
},
)
prunedCount = promauto.NewCounter(
prometheus.CounterOpts{
Name: "doublylinkedtree_pruned_count",
Help: "The number of times pruning happened.",
},
)
validatedCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "doublylinkedtree_validated_count",
Help: "The number of blocks that have been fully validated.",
},
)
)

View File

@@ -1,114 +0,0 @@
package doublylinkedtree
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestNoVote_CanFindHead(t *testing.T) {
balances := make([]uint64, 16)
f := setup(1, 1)
// The head should always start at the finalized block.
r, err := f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
if r != params.BeaconConfig().ZeroHash {
t.Errorf("Incorrect head with genesis")
}
// Insert block 2 into the tree and verify head is at 2:
// 0
// /
// 2 <- head
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
// Insert block 1 into the tree and verify head is still at 2:
// 0
// / \
// head -> 2 1
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
// Insert block 3 into the tree and verify head is still at 2:
// 0
// / \
// head -> 2 1
// |
// 3
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
// Insert block 4 into the tree and verify head is at 4:
// 0
// / \
// 2 1
// | |
// head -> 4 3
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(4), indexToHash(2), 1, 1))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
// Insert block 5 with justified epoch of 2, verify head is still at 4.
// 0
// / \
// 2 1
// | |
// head -> 4 3
// |
// 5 <- justified epoch = 2
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 1))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
// Verify there's an error when starting from a block with wrong justified epoch.
// 0
// / \
// 2 1
// | |
// head -> 4 3
// |
// 5 <- starting from 5 with justified epoch 0 should error
_, err = f.Head(context.Background(), 1, indexToHash(5), balances, 1)
wanted := "head at slot 0 with weight 0 is not eligible, finalizedEpoch 1 != 1, justifiedEpoch 2 != 1"
require.ErrorContains(t, wanted, err)
// Set the justified epoch to 2 and start block to 5 to verify head is 5.
// 0
// / \
// 2 1
// | |
// 4 3
// |
// 5 <- head
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 2")
// Insert block 6 with justified epoch of 2, verify head is at 6.
// 0
// / \
// 2 1
// | |
// 4 3
// |
// 5
// |
// 6 <- head
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(5), 2, 1))
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 2")
}

View File

@@ -1,153 +0,0 @@
package doublylinkedtree
import (
"bytes"
"context"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
)
// depth returns the length of the path to the root of Fork Choice
func (n *Node) depth() uint64 {
ret := uint64(0)
for node := n.parent; node != nil; node = node.parent {
ret += 1
}
return ret
}
// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
// using the current balance stored in each node. This function requires a lock
// in Store.nodesLock
func (n *Node) applyWeightChanges(ctx context.Context) error {
// Recursively calling the children to sum their weights.
childrenWeight := uint64(0)
for _, child := range n.children {
if ctx.Err() != nil {
return ctx.Err()
}
if err := child.applyWeightChanges(ctx); err != nil {
return err
}
childrenWeight += child.weight
}
if n.root == params.BeaconConfig().ZeroHash {
return nil
}
n.weight = n.balance + childrenWeight
return nil
}
// updateBestDescendant updates the best descendant of this node and its children.
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch types.Epoch) error {
if ctx.Err() != nil {
return ctx.Err()
}
if len(n.children) == 0 {
n.bestDescendant = nil
return nil
}
var bestChild *Node
bestWeight := uint64(0)
hasViableDescendant := false
for _, child := range n.children {
if child == nil {
return errNilNode
}
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch); err != nil {
return err
}
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, finalizedEpoch)
if childLeadsToViableHead && !hasViableDescendant {
// The child leads to a viable head, but the current
// parent's best child doesn't.
bestWeight = child.weight
bestChild = child
hasViableDescendant = true
} else if childLeadsToViableHead {
// If both are viable, compare their weights.
if child.weight == bestWeight {
// Tie-breaker of equal weights by root.
if bytes.Compare(child.root[:], bestChild.root[:]) > 0 {
bestChild = child
}
} else if child.weight > bestWeight {
bestChild = child
bestWeight = child.weight
}
}
}
if hasViableDescendant {
if bestChild.bestDescendant == nil {
n.bestDescendant = bestChild
} else {
n.bestDescendant = bestChild.bestDescendant
}
} else {
n.bestDescendant = nil
}
return nil
}
// viableForHead returns true if the node is viable to head.
// Any node with different finalized or justified epoch than
// the ones in fork choice store should not be viable to head.
func (n *Node) viableForHead(justifiedEpoch, finalizedEpoch types.Epoch) bool {
justified := justifiedEpoch == n.justifiedEpoch || justifiedEpoch == 0
finalized := finalizedEpoch == n.finalizedEpoch || finalizedEpoch == 0
return justified && finalized
}
func (n *Node) leadsToViableHead(justifiedEpoch, finalizedEpoch types.Epoch) bool {
if n.bestDescendant == nil {
return n.viableForHead(justifiedEpoch, finalizedEpoch)
}
return n.bestDescendant.viableForHead(justifiedEpoch, finalizedEpoch)
}
// setNodeAndParentValidated sets the current node and the parent as validated (i.e. non-optimistic).
func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
if ctx.Err() != nil {
return ctx.Err()
}
if !n.optimistic || n.parent == nil {
return nil
}
n.optimistic = false
validatedCount.Inc()
return n.parent.setNodeAndParentValidated(ctx)
}
// rpcNodes is used by the RPC Debug endpoint to return information
// about all nodes in the fork choice store
func (n *Node) rpcNodes(ret []*pbrpc.ForkChoiceNode) []*pbrpc.ForkChoiceNode {
for _, child := range n.children {
ret = child.rpcNodes(ret)
}
r := n.root
p := [32]byte{}
if n.parent != nil {
copy(p[:], n.parent.root[:])
}
b := [32]byte{}
if n.bestDescendant != nil {
copy(b[:], n.bestDescendant.root[:])
}
node := &pbrpc.ForkChoiceNode{
Slot: n.slot,
Root: r[:],
Parent: p[:],
JustifiedEpoch: n.justifiedEpoch,
FinalizedEpoch: n.finalizedEpoch,
Weight: n.weight,
BestDescendant: b[:],
}
ret = append(ret, node)
return ret
}

View File

@@ -1,206 +0,0 @@
package doublylinkedtree
import (
"context"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0))
// The updated balances of each node is 100
s := f.store
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
}
func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0))
// The updated balances of each node is 100
s := f.store
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
s.nodeByRoot[indexToHash(1)].weight = 400
s.nodeByRoot[indexToHash(2)].weight = 400
s.nodeByRoot[indexToHash(3)].weight = 400
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
}
func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is not viable.
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 2, 3))
// Verify parent's best child and best descendant are `none`.
s := f.store
assert.Equal(t, 1, len(s.treeRootNode.children))
nilBestDescendant := s.treeRootNode.bestDescendant == nil
assert.Equal(t, true, nilBestDescendant)
}
func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
s := f.store
assert.Equal(t, 1, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
}
func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 100
s.nodeByRoot[indexToHash(2)].weight = 200
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1))
assert.Equal(t, 2, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[1], s.treeRootNode.bestDescendant)
}
func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 200
s.nodeByRoot[indexToHash(2)].weight = 100
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1))
assert.Equal(t, 2, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
}
func TestNode_TestDepth(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1))
s := f.store
require.Equal(t, s.nodeByRoot[indexToHash(2)].depth(), uint64(2))
require.Equal(t, s.nodeByRoot[indexToHash(3)].depth(), uint64(1))
}
func TestNode_ViableForHead(t *testing.T) {
tests := []struct {
n *Node
justifiedEpoch types.Epoch
finalizedEpoch types.Epoch
want bool
}{
{&Node{}, 0, 0, true},
{&Node{}, 1, 0, false},
{&Node{}, 0, 1, false},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 1, 1, true},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 2, 2, false},
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, 3, true},
}
for _, tc := range tests {
got := tc.n.viableForHead(tc.justifiedEpoch, tc.finalizedEpoch)
assert.Equal(t, tc.want, got)
}
}
func TestNode_LeadsToViableHead(t *testing.T) {
f := setup(4, 3)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(1), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(2), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(3), 4, 3))
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 3))
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 3))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 3))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 3))
}
func TestNode_SetFullyValidated(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// insert blocks in the fork pattern (optimistic status in parenthesis)
//
// 0 (false) -- 1 (false) -- 2 (false) -- 3 (true) -- 4 (true)
// \
// -- 5 (true)
//
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.SetOptimisticToValid(ctx, params.BeaconConfig().ZeroHash))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1))
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(1)))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(3), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(1), 1, 1))
opt, err := f.IsOptimistic(ctx, indexToHash(5))
require.NoError(t, err)
require.Equal(t, true, opt)
opt, err = f.IsOptimistic(ctx, indexToHash(4))
require.NoError(t, err)
require.Equal(t, true, opt)
require.NoError(t, f.store.nodeByRoot[indexToHash(4)].setNodeAndParentValidated(ctx))
// block 5 should still be optimistic
opt, err = f.IsOptimistic(ctx, indexToHash(5))
require.NoError(t, err)
require.Equal(t, true, opt)
// block 4 and 3 should now be valid
opt, err = f.IsOptimistic(ctx, indexToHash(4))
require.NoError(t, err)
require.Equal(t, false, opt)
opt, err = f.IsOptimistic(ctx, indexToHash(3))
require.NoError(t, err)
require.Equal(t, false, opt)
}

View File

@@ -1,50 +0,0 @@
package doublylinkedtree
import (
"context"
)
// removeNode removes the node with the given root and all of its children
// from the Fork Choice Store.
func (s *Store) removeNode(ctx context.Context, root [32]byte) error {
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
return errNilNode
}
if !node.optimistic || node.parent == nil {
return errInvalidOptimisticStatus
}
children := node.parent.children
if len(children) == 1 {
node.parent.children = []*Node{}
} else {
for i, n := range children {
if n == node {
if i != len(children)-1 {
children[i] = children[len(children)-1]
}
node.parent.children = children[:len(children)-2]
break
}
}
}
return s.removeNodeAndChildren(ctx, node)
}
// removeNodeAndChildren removes `node` and all of its descendant from the Store
func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node) error {
for _, child := range node.children {
if ctx.Err() != nil {
return ctx.Err()
}
if err := s.removeNodeAndChildren(ctx, child); err != nil {
return err
}
}
delete(s.nodeByRoot, node.root)
return nil
}

View File

@@ -1,70 +0,0 @@
package doublylinkedtree
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/require"
)
// We test the algorithm to update a node from SYNCING to INVALID
// We start with the same diagram as above:
//
// E -- F
// /
// C -- D
// / \
// A -- B G -- H -- I
// \ \
// J -- K -- L
//
// And every block in the Fork choice is optimistic.
//
func TestPruneInvalid(t *testing.T) {
tests := []struct {
root [32]byte // the root of the new INVALID block
wantedNodeNumber int
}{
{
[32]byte{'j'},
12,
},
{
[32]byte{'c'},
4,
},
{
[32]byte{'i'},
12,
},
{
[32]byte{'h'},
11,
},
{
[32]byte{'g'},
8,
},
}
for _, tc := range tests {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
require.NoError(t, f.store.removeNode(context.Background(), tc.root))
require.Equal(t, tc.wantedNodeNumber, f.NodeCount())
}
}

View File

@@ -1,73 +0,0 @@
package doublylinkedtree
import (
"context"
"time"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/time/slots"
)
// BoostProposerRoot sets the block root which should be boosted during
// the LMD fork choice algorithm calculations. This is meant to reward timely,
// proposed blocks which occur before a cutoff interval set to
// SECONDS_PER_SLOT // INTERVALS_PER_SLOT.
//
// time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
// is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
// if get_current_slot(store) == block.slot and is_before_attesting_interval:
// store.proposer_boost_root = hash_tree_root(block)
func (f *ForkChoice) BoostProposerRoot(_ context.Context, blockSlot types.Slot, blockRoot [32]byte, genesisTime time.Time) error {
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
timeIntoSlot := uint64(time.Since(genesisTime).Seconds()) % secondsPerSlot
isBeforeAttestingInterval := timeIntoSlot < secondsPerSlot/params.BeaconConfig().IntervalsPerSlot
currentSlot := slots.SinceGenesis(genesisTime)
// Only update the boosted proposer root to the incoming block root
// if the block is for the current, clock-based slot and the block was timely.
if currentSlot == blockSlot && isBeforeAttestingInterval {
f.store.proposerBoostLock.Lock()
f.store.proposerBoostRoot = blockRoot
f.store.proposerBoostLock.Unlock()
}
return nil
}
// ResetBoostedProposerRoot sets the value of the proposer boosted root to zeros.
func (f *ForkChoice) ResetBoostedProposerRoot(_ context.Context) error {
f.store.proposerBoostLock.Lock()
f.store.proposerBoostRoot = [32]byte{}
f.store.proposerBoostLock.Unlock()
return nil
}
// Given a list of validator balances, we compute the proposer boost score
// that should be given to a proposer based on their committee weight, derived from
// the total active balances, the size of a committee, and a boost score constant.
// IMPORTANT: The caller MUST pass in a list of validator balances where balances > 0 refer to active
// validators while balances == 0 are for inactive validators.
func computeProposerBoostScore(validatorBalances []uint64) (score uint64, err error) {
totalActiveBalance := uint64(0)
numActive := uint64(0)
for _, balance := range validatorBalances {
// We only consider balances > 0. The input slice should be constructed
// as balance > 0 for all active validators and 0 for inactive ones.
if balance == 0 {
continue
}
totalActiveBalance += balance
numActive += 1
}
if numActive == 0 {
// Should never happen.
err = errors.New("no active validators")
return
}
avgBalance := totalActiveBalance / numActive
committeeSize := numActive / uint64(params.BeaconConfig().SlotsPerEpoch)
committeeWeight := committeeSize * avgBalance
score = (committeeWeight * params.BeaconConfig().ProposerScoreBoost) / 100
return
}

View File

@@ -1,480 +0,0 @@
package doublylinkedtree
import (
"context"
"fmt"
"testing"
"time"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
// Simple, ex-ante attack mitigation using proposer boost.
// In a nutshell, an adversarial block proposer in slot n+1 keeps its proposal hidden.
// The honest block proposer in slot n+2 will then propose an honest block. The
// adversary can now use its committee members votes from both slots n+1 and n+2.
// and release their withheld block of slot n+2 in an attempt to win fork choice.
// If the honest proposal is boosted at slot n+2, it will win against this attacker.
func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
ctx := context.Background()
zeroHash := params.BeaconConfig().ZeroHash
balances := make([]uint64, 64) // 64 active validators.
for i := 0; i < len(balances); i++ {
balances[i] = 10
}
jEpoch, fEpoch := types.Epoch(0), types.Epoch(0)
t.Run("back-propagates boost score to ancestors after proposer boosting", func(t *testing.T) {
f := setup(jEpoch, fEpoch)
// The head should always start at the finalized block.
headRoot, err := f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, zeroHash, headRoot, "Incorrect head with genesis")
// Insert block at slot 1 into the tree and verify head is at that block:
// 0
// |
// 1 <- HEAD
slot := types.Slot(1)
newRoot := indexToHash(1)
require.NoError(t,
f.InsertOptimisticBlock(
ctx,
slot,
newRoot,
headRoot,
jEpoch,
fEpoch,
),
)
f.ProcessAttestation(ctx, []uint64{0}, newRoot, fEpoch)
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 1")
// Insert block at slot 2 into the tree and verify head is at that block:
// 0
// |
// 1
// |
// 2 <- HEAD
slot = types.Slot(2)
newRoot = indexToHash(2)
require.NoError(t,
f.InsertOptimisticBlock(
ctx,
slot,
newRoot,
headRoot,
jEpoch,
fEpoch,
),
)
f.ProcessAttestation(ctx, []uint64{1}, newRoot, fEpoch)
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 2")
// Insert block at slot 3 into the tree and verify head is at that block:
// 0
// |
// 1
// |
// 2
// |
// 3 <- HEAD
slot = types.Slot(3)
newRoot = indexToHash(3)
require.NoError(t,
f.InsertOptimisticBlock(
ctx,
slot,
newRoot,
headRoot,
jEpoch,
fEpoch,
),
)
f.ProcessAttestation(ctx, []uint64{2}, newRoot, fEpoch)
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
// Insert a second block at slot 3 into the tree and boost its score.
// 0
// |
// 1
// |
// 2
// / \
// 3 4 <- HEAD
slot = types.Slot(3)
newRoot = indexToHash(4)
require.NoError(t,
f.InsertOptimisticBlock(
ctx,
slot,
newRoot,
headRoot,
jEpoch,
fEpoch,
),
)
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
threeSlots := 3 * params.BeaconConfig().SecondsPerSlot
genesisTime := time.Now().Add(-time.Second * time.Duration(threeSlots))
require.NoError(t, f.BoostProposerRoot(ctx, slot, newRoot, genesisTime))
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
// Check the ancestor scores from the store.
require.Equal(t, 5, len(f.store.nodeByRoot))
// Expect nodes to have a boosted, back-propagated score.
// Ancestors have the added weights of their children. Genesis is a special exception at 0 weight,
require.Equal(t, f.store.treeRootNode.weight, uint64(0))
// Otherwise, assuming a block, A, that is not-genesis:
//
// A -> B -> C
//
//Where each one has a weight of 10 individually, the final weights will look like
//
// (A: 30) -> (B: 20) -> (C: 10)
//
// The boost adds 14 to the weight, so if C is boosted, we would have
//
// (A: 44) -> (B: 34) -> (C: 24)
//
// In this case, we have a small fork:
//
// (A: 54) -> (B: 44) -> (C: 34)
// \_->(D: 24)
//
// So B has its own weight, 10, and the sum of both C and D. That's why we see weight 54 in the
// middle instead of the normal progression of (44 -> 34 -> 24).
node1 := f.store.nodeByRoot[indexToHash(1)]
require.Equal(t, node1.weight, uint64(54))
node2 := f.store.nodeByRoot[indexToHash(2)]
require.Equal(t, node2.weight, uint64(44))
node3 := f.store.nodeByRoot[indexToHash(4)]
require.Equal(t, node3.weight, uint64(24))
})
t.Run("vanilla ex ante attack", func(t *testing.T) {
f := setup(jEpoch, fEpoch)
// The head should always start at the finalized block.
r, err := f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, zeroHash, r, "Incorrect head with genesis")
// Proposer from slot 1 does not reveal their block, B, at slot 1.
// Proposer at slot 2 does reveal their block, C, and it becomes the head.
// C builds on A, as proposer at slot 1 did not reveal B.
// A
// / \
// (B?) \
// \
// C <- Slot 2 HEAD
honestBlockSlot := types.Slot(2)
honestBlock := indexToHash(2)
require.NoError(t,
f.InsertOptimisticBlock(
ctx,
honestBlockSlot,
honestBlock,
zeroHash,
jEpoch,
fEpoch,
),
)
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
maliciouslyWithheldBlockSlot := types.Slot(1)
maliciouslyWithheldBlock := indexToHash(1)
require.NoError(t,
f.InsertOptimisticBlock(
ctx,
maliciouslyWithheldBlockSlot,
maliciouslyWithheldBlock,
zeroHash,
jEpoch,
fEpoch,
),
)
// Ensure the head is C, the honest block.
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
// We boost the honest proposal at slot 2.
secondsPerSlot := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot)
genesis := time.Now().Add(-2 * secondsPerSlot)
require.NoError(t, f.BoostProposerRoot(ctx, honestBlockSlot, honestBlock, genesis))
// The maliciously withheld block has one vote.
votes := []uint64{1}
f.ProcessAttestation(ctx, votes, maliciouslyWithheldBlock, fEpoch)
// Ensure the head is STILL C, the honest block, as the honest block had proposer boost.
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
})
t.Run("adversarial attestations > proposer boosting", func(t *testing.T) {
f := setup(jEpoch, fEpoch)
// The head should always start at the finalized block.
r, err := f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, zeroHash, r, "Incorrect head with genesis")
// Proposer from slot 1 does not reveal their block, B, at slot 1.
// Proposer at slot 2 does reveal their block, C, and it becomes the head.
// C builds on A, as proposer at slot 1 did not reveal B.
// A
// / \
// (B?) \
// \
// C <- Slot 2 HEAD
honestBlockSlot := types.Slot(2)
honestBlock := indexToHash(2)
require.NoError(t,
f.InsertOptimisticBlock(
ctx,
honestBlockSlot,
honestBlock,
zeroHash,
jEpoch,
fEpoch,
),
)
// Ensure C is the head.
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
maliciouslyWithheldBlockSlot := types.Slot(1)
maliciouslyWithheldBlock := indexToHash(1)
require.NoError(t,
f.InsertOptimisticBlock(
ctx,
maliciouslyWithheldBlockSlot,
maliciouslyWithheldBlock,
zeroHash,
jEpoch,
fEpoch,
),
)
// Ensure C is still the head after the malicious proposer reveals their block.
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
// We boost the honest proposal at slot 2.
secondsPerSlot := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot)
genesis := time.Now().Add(-2 * secondsPerSlot)
require.NoError(t, f.BoostProposerRoot(ctx, honestBlockSlot, honestBlock, genesis))
// An attestation is received for B that has more voting power than C with the proposer boost,
// allowing B to then become the head if their attestation has enough adversarial votes.
votes := []uint64{1, 2}
f.ProcessAttestation(ctx, votes, maliciouslyWithheldBlock, fEpoch)
// Expect the head to have switched to B.
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, maliciouslyWithheldBlock, r, "Expected B to become the head")
})
t.Run("boosting necessary to sandwich attack", func(t *testing.T) {
// Boosting necessary to sandwich attack.
// Objects:
// Block A - slot N
// Block B (parent A) - slot N+1
// Block C (parent A) - slot N+2
// Block D (parent B) - slot N+3
// Attestation_1 (Block C); size 1 - slot N+2 (honest)
// Steps:
// Block A received at N — A is head
// Block C received at N+2 — C is head
// Block B received at N+2 — C is head
// Attestation_1 received at N+3 — C is head
// Block D received at N+3 — D is head
f := setup(jEpoch, fEpoch)
a := zeroHash
// The head should always start at the finalized block.
r, err := f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, zeroHash, r, "Incorrect head with genesis")
cSlot := types.Slot(2)
c := indexToHash(2)
require.NoError(t,
f.InsertOptimisticBlock(
ctx,
cSlot,
c,
a, // parent
jEpoch,
fEpoch,
),
)
// Ensure C is the head.
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, c, r, "Incorrect head for justified epoch at slot 2")
// We boost C.
secondsPerSlot := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot)
genesis := time.Now().Add(-2 * secondsPerSlot)
require.NoError(t, f.BoostProposerRoot(ctx, cSlot /* slot */, c, genesis))
bSlot := types.Slot(1)
b := indexToHash(1)
require.NoError(t,
f.InsertOptimisticBlock(
ctx,
bSlot,
b,
a, // parent
jEpoch,
fEpoch,
),
)
// Ensure C is still the head.
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, c, r, "Incorrect head for justified epoch at slot 2")
// An attestation for C is received at slot N+3.
votes := []uint64{1}
f.ProcessAttestation(ctx, votes, c, fEpoch)
// A block D, building on B, is received at slot N+3. It should not be able to win without boosting.
dSlot := types.Slot(3)
d := indexToHash(3)
require.NoError(t,
f.InsertOptimisticBlock(
ctx,
dSlot,
d,
b, // parent
jEpoch,
fEpoch,
),
)
// D cannot win without a boost.
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, c, r, "Expected C to remain the head")
// Block D receives the boost.
genesis = time.Now().Add(-3 * secondsPerSlot)
require.NoError(t, f.BoostProposerRoot(ctx, dSlot /* slot */, d, genesis))
// Ensure D becomes the head thanks to boosting.
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
require.NoError(t, err)
assert.Equal(t, d, r, "Expected D to become the head")
})
}
func TestForkChoice_BoostProposerRoot(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.SecondsPerSlot = 6
cfg.IntervalsPerSlot = 3
params.OverrideBeaconConfig(cfg)
ctx := context.Background()
t.Run("does not boost block from different slot", func(t *testing.T) {
f := &ForkChoice{
store: &Store{},
}
// Genesis set to 1 slot ago.
genesis := time.Now().Add(-time.Duration(cfg.SecondsPerSlot) * time.Second)
blockRoot := [32]byte{'A'}
// Trying to boost a block from slot 0 should not work.
err := f.BoostProposerRoot(ctx, types.Slot(0), blockRoot, genesis)
require.NoError(t, err)
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
})
t.Run("does not boost untimely block from same slot", func(t *testing.T) {
f := &ForkChoice{
store: &Store{},
}
// Genesis set to 1 slot ago + X where X > attesting interval.
genesis := time.Now().Add(-time.Duration(cfg.SecondsPerSlot) * time.Second)
attestingInterval := time.Duration(cfg.SecondsPerSlot / cfg.IntervalsPerSlot)
greaterThanAttestingInterval := attestingInterval + 100*time.Millisecond
genesis = genesis.Add(-greaterThanAttestingInterval * time.Second)
blockRoot := [32]byte{'A'}
// Trying to boost a block from slot 1 that is untimely should not work.
err := f.BoostProposerRoot(ctx, types.Slot(1), blockRoot, genesis)
require.NoError(t, err)
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
})
t.Run("boosts perfectly timely block from same slot", func(t *testing.T) {
f := &ForkChoice{
store: &Store{},
}
// Genesis set to 1 slot ago + 0 seconds into the attesting interval.
genesis := time.Now().Add(-time.Duration(cfg.SecondsPerSlot) * time.Second)
fmt.Println(genesis)
blockRoot := [32]byte{'A'}
err := f.BoostProposerRoot(ctx, types.Slot(1), blockRoot, genesis)
require.NoError(t, err)
require.DeepEqual(t, [32]byte{'A'}, f.store.proposerBoostRoot)
})
t.Run("boosts timely block from same slot", func(t *testing.T) {
f := &ForkChoice{
store: &Store{},
}
// Genesis set to 1 slot ago + (attesting interval / 2).
genesis := time.Now().Add(-time.Duration(cfg.SecondsPerSlot) * time.Second)
blockRoot := [32]byte{'A'}
halfAttestingInterval := time.Second
genesis = genesis.Add(-halfAttestingInterval)
err := f.BoostProposerRoot(ctx, types.Slot(1), blockRoot, genesis)
require.NoError(t, err)
require.DeepEqual(t, [32]byte{'A'}, f.store.proposerBoostRoot)
})
}
func TestForkChoice_computeProposerBoostScore(t *testing.T) {
t.Run("nil justified balances throws error", func(t *testing.T) {
_, err := computeProposerBoostScore(nil)
require.ErrorContains(t, "no active validators", err)
})
t.Run("normal active balances computes score", func(t *testing.T) {
validatorBalances := make([]uint64, 64) // Num validators
for i := 0; i < len(validatorBalances); i++ {
validatorBalances[i] = 10
}
// Avg balance is 10, and the number of validators is 64.
// With a committee size of num validators (64) / slots per epoch (32) == 2.
// we then have a committee weight of avg balance * committee size = 10 * 2 = 20.
// The score then becomes 10 * PROPOSER_SCORE_BOOST // 100, which is
// 20 * 70 / 100 = 14.
score, err := computeProposerBoostScore(validatorBalances)
require.NoError(t, err)
require.Equal(t, uint64(14), score)
})
}

View File

@@ -1,215 +0,0 @@
package doublylinkedtree
import (
"context"
"fmt"
types "github.com/prysmaticlabs/eth2-types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
"go.opencensus.io/trace"
)
// This defines the minimal number of block nodes that can be in the tree
// before getting pruned upon new finalization.
const defaultPruneThreshold = 256
// applyProposerBoostScore applies the current proposer boost scores to the
// relevant nodes
func (s *Store) applyProposerBoostScore(newBalances []uint64) error {
s.proposerBoostLock.Lock()
defer s.proposerBoostLock.Unlock()
proposerScore := uint64(0)
var err error
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
previousNode, ok := s.nodeByRoot[s.previousProposerBoostRoot]
if !ok || previousNode == nil {
return errInvalidProposerBoostRoot
}
previousNode.balance -= s.previousProposerBoostScore
}
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash {
currentNode, ok := s.nodeByRoot[s.proposerBoostRoot]
if !ok || currentNode == nil {
return errInvalidProposerBoostRoot
}
proposerScore, err = computeProposerBoostScore(newBalances)
if err != nil {
return err
}
currentNode.balance += proposerScore
}
s.previousProposerBoostRoot = s.proposerBoostRoot
s.previousProposerBoostScore = proposerScore
return nil
}
// ProposerBoost of fork choice store.
func (s *Store) proposerBoost() [fieldparams.RootLength]byte {
s.proposerBoostLock.RLock()
defer s.proposerBoostLock.RUnlock()
return s.proposerBoostRoot
}
// PruneThreshold of fork choice store.
func (s *Store) PruneThreshold() uint64 {
return s.pruneThreshold
}
// head starts from justified root and then follows the best descendant links
// to find the best block for head. This function assumes a lock on s.nodesLock
func (s *Store) head(ctx context.Context, justifiedRoot [32]byte) ([32]byte, error) {
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.head")
defer span.End()
// JustifiedRoot has to be known
justifiedNode, ok := s.nodeByRoot[justifiedRoot]
if !ok || justifiedNode == nil {
return [32]byte{}, errUnknownJustifiedRoot
}
// If the justified node doesn't have a best descendant,
// the best node is itself.
bestDescendant := justifiedNode.bestDescendant
if bestDescendant == nil {
bestDescendant = justifiedNode
}
if !bestDescendant.viableForHead(s.justifiedEpoch, s.finalizedEpoch) {
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch %d != %d, justifiedEpoch %d != %d",
bestDescendant.slot, bestDescendant.weight/10e9, bestDescendant.finalizedEpoch, s.finalizedEpoch, bestDescendant.justifiedEpoch, s.justifiedEpoch)
}
// Update metrics.
if bestDescendant != s.headNode {
headChangesCount.Inc()
headSlotNumber.Set(float64(bestDescendant.slot))
s.headNode = bestDescendant
}
return bestDescendant.root, nil
}
// insert registers a new block node to the fork choice store's node list.
// It then updates the new node's parent with best child and descendant node.
func (s *Store) insert(ctx context.Context,
slot types.Slot,
root, parentRoot [fieldparams.RootLength]byte,
justifiedEpoch, finalizedEpoch types.Epoch) error {
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
defer span.End()
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
// Return if the block has been inserted into Store before.
if _, ok := s.nodeByRoot[root]; ok {
return nil
}
parent := s.nodeByRoot[parentRoot]
n := &Node{
slot: slot,
root: root,
parent: parent,
justifiedEpoch: justifiedEpoch,
finalizedEpoch: finalizedEpoch,
optimistic: true,
}
s.nodeByRoot[root] = n
if parent != nil {
parent.children = append(parent.children, n)
if err := s.treeRootNode.updateBestDescendant(ctx, s.justifiedEpoch, s.finalizedEpoch); err != nil {
return err
}
}
// Set the node as root if the store was empty
if s.treeRootNode == nil {
s.treeRootNode = n
s.headNode = n
}
// Update metrics.
processedBlockCount.Inc()
nodeCount.Set(float64(len(s.nodeByRoot)))
return nil
}
// updateCheckpoints Update the justified / finalized epochs in store if necessary.
func (s *Store) updateCheckpoints(justifiedEpoch, finalizedEpoch types.Epoch) {
s.justifiedEpoch = justifiedEpoch
s.finalizedEpoch = finalizedEpoch
}
// pruneFinalizedNodeByRootMap prunes the `nodeByRoot` map
// starting from `node` down to the finalized Node or to a leaf of the Fork
// choice store. This method assumes a lock on nodesLock.
func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalizedNode *Node) error {
if ctx.Err() != nil {
return ctx.Err()
}
if node == finalizedNode {
return nil
}
for _, child := range node.children {
if err := s.pruneFinalizedNodeByRootMap(ctx, child, finalizedNode); err != nil {
return err
}
}
delete(s.nodeByRoot, node.root)
return nil
}
// prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
// root is different than the current store finalized root, and the number of the store has met prune threshold.
// This function does not prune for invalid optimistically synced nodes, it deals only with pruning upon finalization
func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Prune")
defer span.End()
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
finalizedNode, ok := s.nodeByRoot[finalizedRoot]
if !ok || finalizedNode == nil {
return errUnknownFinalizedRoot
}
// The number of the nodes has not met the prune threshold.
// Pruning at small numbers incurs more cost than benefit.
if finalizedNode.depth() < s.pruneThreshold {
return nil
}
// Prune nodeByRoot starting from root
if err := s.pruneFinalizedNodeByRootMap(ctx, s.treeRootNode, finalizedNode); err != nil {
return err
}
finalizedNode.parent = nil
s.treeRootNode = finalizedNode
prunedCount.Inc()
return nil
}
// tips returns a list of possible heads from fork choice store, it returns the
// roots and the slots of the leaf nodes.
func (s *Store) tips() ([][32]byte, []types.Slot) {
var roots [][32]byte
var slots []types.Slot
for root, node := range s.nodeByRoot {
if len(node.children) == 0 {
roots = append(roots, root)
slots = append(slots, node.slot)
}
}
return roots, slots
}

View File

@@ -1,274 +0,0 @@
package doublylinkedtree
import (
"context"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestStore_PruneThreshold(t *testing.T) {
s := &Store{
pruneThreshold: defaultPruneThreshold,
}
if got := s.PruneThreshold(); got != defaultPruneThreshold {
t.Errorf("PruneThreshold() = %v, want %v", got, defaultPruneThreshold)
}
}
func TestStore_JustifiedEpoch(t *testing.T) {
j := types.Epoch(100)
f := setup(j, j)
require.Equal(t, j, f.JustifiedEpoch())
}
func TestStore_FinalizedEpoch(t *testing.T) {
j := types.Epoch(50)
f := setup(j, j)
require.Equal(t, j, f.FinalizedEpoch())
}
func TestStore_NodeCount(t *testing.T) {
f := setup(0, 0)
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.Equal(t, 2, f.NodeCount())
}
func TestStore_NodeByRoot(t *testing.T) {
f := setup(0, 0)
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 0, 0))
node0 := f.store.treeRootNode
node1 := node0.children[0]
node2 := node1.children[0]
expectedRoots := map[[32]byte]*Node{
params.BeaconConfig().ZeroHash: node0,
indexToHash(1): node1,
indexToHash(2): node2,
}
require.Equal(t, 3, f.NodeCount())
for root, node := range f.store.nodeByRoot {
v, ok := expectedRoots[root]
require.Equal(t, ok, true)
require.Equal(t, v, node)
}
}
func TestForkChoice_HasNode(t *testing.T) {
f := setup(0, 0)
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.Equal(t, true, f.HasNode(indexToHash(1)))
}
func TestStore_Head_UnknownJustifiedRoot(t *testing.T) {
f := setup(0, 0)
_, err := f.store.head(context.Background(), [32]byte{'a'})
assert.ErrorContains(t, errUnknownJustifiedRoot.Error(), err)
}
func TestStore_Head_Itself(t *testing.T) {
f := setup(0, 0)
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
// Since the justified node does not have a best descendant so the best node
// is itself.
h, err := f.store.head(context.Background(), indexToHash(1))
require.NoError(t, err)
assert.Equal(t, indexToHash(1), h)
}
func TestStore_Head_BestDescendant(t *testing.T) {
f := setup(0, 0)
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(3), indexToHash(1), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(4), indexToHash(2), 0, 0))
h, err := f.store.head(context.Background(), indexToHash(1))
require.NoError(t, err)
require.Equal(t, h, indexToHash(4))
}
func TestStore_UpdateBestDescendant_ContextCancelled(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
f := setup(0, 0)
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
cancel()
err := f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0)
require.ErrorContains(t, "context canceled", err)
}
func TestStore_Insert(t *testing.T) {
// The new node does not have a parent.
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode}
require.NoError(t, s.insert(context.Background(), 100, indexToHash(100), indexToHash(0), 1, 1))
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")
assert.Equal(t, (*Node)(nil), treeRootNode.parent, "Incorrect parent")
assert.Equal(t, 1, len(treeRootNode.children), "Incorrect children number")
child := treeRootNode.children[0]
assert.Equal(t, types.Epoch(1), child.justifiedEpoch, "Incorrect justification")
assert.Equal(t, types.Epoch(1), child.finalizedEpoch, "Incorrect finalization")
assert.Equal(t, indexToHash(100), child.root, "Incorrect root")
}
func TestStore_updateCheckpoints(t *testing.T) {
f := setup(0, 0)
s := f.store
s.updateCheckpoints(1, 1)
assert.Equal(t, types.Epoch(1), s.justifiedEpoch, "Did not update justified epoch")
assert.Equal(t, types.Epoch(1), s.finalizedEpoch, "Did not update finalized epoch")
}
func TestStore_Prune_LessThanThreshold(t *testing.T) {
// Define 100 nodes in store.
numOfNodes := uint64(100)
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
for i := uint64(2); i < numOfNodes; i++ {
require.NoError(t, f.InsertOptimisticBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0))
}
s := f.store
s.pruneThreshold = 100
// Finalized root has depth 99 so everything before it should be pruned,
// but PruneThreshold is at 100 so nothing will be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
assert.Equal(t, 100, len(s.nodeByRoot), "Incorrect nodes count")
}
func TestStore_Prune_MoreThanThreshold(t *testing.T) {
// Define 100 nodes in store.
numOfNodes := uint64(100)
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
for i := uint64(2); i < numOfNodes; i++ {
require.NoError(t, f.InsertOptimisticBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0))
}
s := f.store
s.pruneThreshold = 0
// Finalized root is at index 99 so everything before 99 should be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
assert.Equal(t, 1, len(s.nodeByRoot), "Incorrect nodes count")
}
func TestStore_Prune_MoreThanOnce(t *testing.T) {
// Define 100 nodes in store.
numOfNodes := uint64(100)
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
for i := uint64(2); i < numOfNodes; i++ {
require.NoError(t, f.InsertOptimisticBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0))
}
s := f.store
s.pruneThreshold = 0
// Finalized root is at index 11 so everything before 11 should be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(10)))
assert.Equal(t, 90, len(s.nodeByRoot), "Incorrect nodes count")
// One more time.
require.NoError(t, s.prune(context.Background(), indexToHash(20)))
assert.Equal(t, 80, len(s.nodeByRoot), "Incorrect nodes count")
}
// This unit tests starts with a simple branch like this
//
// - 1
// /
// -- 0 -- 2
//
// And we finalize 1. As a result only 1 should survive
func TestStore_Prune_NoDanglingBranch(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
f.store.pruneThreshold = 0
s := f.store
require.NoError(t, s.prune(context.Background(), indexToHash(1)))
require.Equal(t, len(s.nodeByRoot), 1)
}
// This test starts with the following branching diagram
/// We start with the following diagram
//
// E -- F
// /
// C -- D
// / \
// A -- B G -- H -- I
// \ \
// J -- K -- L
//
//
func TestStore_tips(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
expectedMap := map[[32]byte]types.Slot{
[32]byte{'f'}: 105,
[32]byte{'i'}: 106,
[32]byte{'l'}: 106,
[32]byte{'j'}: 102,
}
roots, slots := f.store.tips()
for i, r := range roots {
expectedSlot, ok := expectedMap[r]
require.Equal(t, true, ok)
require.Equal(t, slots[i], expectedSlot)
}
}
func TestStore_PruneMapsNodes(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
s := f.store
s.pruneThreshold = 0
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1))))
require.Equal(t, len(s.nodeByRoot), 1)
}
func TestStore_HasParent(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 1, 1))
require.Equal(t, false, f.HasParent(params.BeaconConfig().ZeroHash))
require.Equal(t, true, f.HasParent(indexToHash(1)))
require.Equal(t, true, f.HasParent(indexToHash(2)))
require.Equal(t, true, f.HasParent(indexToHash(3)))
require.Equal(t, false, f.HasParent(indexToHash(4)))
}

View File

@@ -1,53 +0,0 @@
package doublylinkedtree
import (
"sync"
types "github.com/prysmaticlabs/eth2-types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
)
// ForkChoice defines the overall fork choice store which includes all block nodes, validator's latest votes and balances.
type ForkChoice struct {
store *Store
votes []Vote // tracks individual validator's last vote.
votesLock sync.RWMutex
balances []uint64 // tracks individual validator's last justified balances.
}
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
type Store struct {
justifiedEpoch types.Epoch // latest justified epoch in store.
finalizedEpoch types.Epoch // latest finalized epoch in store.
pruneThreshold uint64 // do not prune tree unless threshold is reached.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
previousProposerBoostScore uint64 // previous proposer boosted root score.
treeRootNode *Node // the root node of the store tree.
headNode *Node // last head Node
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
nodesLock sync.RWMutex
proposerBoostLock sync.RWMutex
}
// Node defines the individual block which includes its block parent, ancestor and how much weight accounted for it.
// This is used as an array based stateful DAG for efficient fork choice look up.
type Node struct {
slot types.Slot // slot of the block converted to the node.
root [fieldparams.RootLength]byte // root of the block converted to the node.
parent *Node // parent index of this node.
children []*Node // the list of direct children of this Node
justifiedEpoch types.Epoch // justifiedEpoch of this node.
finalizedEpoch types.Epoch // finalizedEpoch of this node.
balance uint64 // the balance that voted for this node directly
weight uint64 // weight of this node: the total balance including children
bestDescendant *Node // bestDescendant node of this node.
optimistic bool // whether the block has been fully validated or not
}
// Vote defines an individual validator's vote.
type Vote struct {
currentRoot [fieldparams.RootLength]byte // current voting root.
nextRoot [fieldparams.RootLength]byte // next voting root.
nextEpoch types.Epoch // epoch of next voting period.
}

View File

@@ -1,297 +0,0 @@
package doublylinkedtree
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestVotes_CanFindHead(t *testing.T) {
balances := []uint64{1, 1}
f := setup(1, 1)
// The head should always start at the finalized block.
r, err := f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().ZeroHash, r, "Incorrect head with genesis")
// Insert block 2 into the tree and verify head is at 2:
// 0
// /
// 2 <- head
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
// Insert block 1 into the tree and verify head is still at 2:
// 0
// / \
// head -> 2 1
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
// Add a vote to block 1 of the tree and verify head is switched to 1:
// 0
// / \
// 2 1 <- +vote, new head
f.ProcessAttestation(context.Background(), []uint64{0}, indexToHash(1), 2)
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(1), r, "Incorrect head for with justified epoch at 1")
// Add a vote to block 2 of the tree and verify head is switched to 2:
// 0
// / \
// vote, new head -> 2 1
f.ProcessAttestation(context.Background(), []uint64{1}, indexToHash(2), 2)
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
// Insert block 3 into the tree and verify head is still at 2:
// 0
// / \
// head -> 2 1
// |
// 3
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
// Move validator 0's vote from 1 to 3 and verify head is still at 2:
// 0
// / \
// head -> 2 1 <- old vote
// |
// 3 <- new vote
f.ProcessAttestation(context.Background(), []uint64{0}, indexToHash(3), 3)
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
// Move validator 1's vote from 2 to 1 and verify head is switched to 3:
// 0
// / \
// old vote -> 2 1 <- new vote
// |
// 3 <- head
f.ProcessAttestation(context.Background(), []uint64{1}, indexToHash(1), 3)
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(3), r, "Incorrect head for with justified epoch at 1")
// Insert block 4 into the tree and verify head is at 4:
// 0
// / \
// 2 1
// |
// 3
// |
// 4 <- head
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(4), indexToHash(3), 1, 1))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
// Insert block 5 with justified epoch 2, it should be filtered out:
// 0
// / \
// 2 1
// |
// 3
// |
// 4 <- head
// /
// 5 <- justified epoch = 2
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 2))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
// Insert block 6 with justified epoch 0:
// 0
// / \
// 2 1
// |
// 3
// |
// 4 <- head
// / \
// 5 6 <- justified epoch = 0
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1))
// Moved 2 votes to block 5:
// 0
// / \
// 2 1
// |
// 3
// |
// 4
// / \
// 2 votes-> 5 6
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1))
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(5), 4)
// Inset blocks 7, 8 and 9:
// 6 should still be the head, even though 5 has all the votes.
// 0
// / \
// 2 1
// |
// 3
// |
// 4
// / \
// 5 6 <- head
// |
// 7
// |
// 8
// |
// 9
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(7), indexToHash(5), 2, 2))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(8), indexToHash(7), 2, 2))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(9), indexToHash(8), 2, 2))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 1")
// Update fork choice justified epoch to 1 and start block to 5.
// Verify 9 is the head:
// 0
// / \
// 2 1
// |
// 3
// |
// 4
// / \
// 5 6
// |
// 7
// |
// 8
// |
// 9 <- head
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
// Insert block 10 and 2 validators updated their vote to 9.
// Verify 9 is the head:
// 0
// / \
// 2 1
// |
// 3
// |
// 4
// / \
// 5 6
// |
// 7
// |
// 8
// / \
// 2 votes->9 10
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(9), 5)
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(10), indexToHash(8), 2, 2))
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
// Add 3 more validators to the system.
balances = []uint64{1, 1, 1, 1, 1}
// The new validators voted for 10.
f.ProcessAttestation(context.Background(), []uint64{2, 3, 4}, indexToHash(10), 5)
// The new head should be 10.
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
// Set the balances of the last 2 validators to 0.
balances = []uint64{1, 1, 1, 0, 0}
// The head should be back to 9.
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 1")
// Set the balances back to normal.
balances = []uint64{1, 1, 1, 1, 1}
// The head should be back to 10.
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
// Remove the last 2 validators.
balances = []uint64{1, 1, 1}
// The head should be back to 9.
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 1")
// Verify pruning below the prune threshold does not affect head.
f.store.pruneThreshold = 1000
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
assert.Equal(t, 11, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
// Verify pruning above the prune threshold does prune:
// 0
// / \
// 2 1
// |
// 3
// |
// 4
// -------pruned here ------
// 5 6
// |
// 7
// |
// 8
// / \
// 9 10
f.store.pruneThreshold = 1
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
assert.Equal(t, 5, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
// Insert new block 11 and verify head is at 11.
// 5 6
// |
// 7
// |
// 8
// / \
// 9 10
// |
// head-> 11
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(11), indexToHash(9), 2, 2))
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)
assert.Equal(t, indexToHash(11), r, "Incorrect head for with justified epoch at 2")
}

View File

@@ -5,8 +5,7 @@ import (
"time"
types "github.com/prysmaticlabs/eth2-types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
)
// ForkChoicer represents the full fork choice interface composed of all the sub-interfaces.
@@ -16,7 +15,6 @@ type ForkChoicer interface {
AttestationProcessor // to track new attestation for fork choice.
Pruner // to clean old data for fork choice.
Getter // to retrieve fork choice information.
Setter // to set fork choice information.
ProposerBooster // ability to boost timely-proposed block roots.
}
@@ -24,18 +22,12 @@ type ForkChoicer interface {
type HeadRetriever interface {
Head(context.Context, types.Epoch, [32]byte, []uint64, types.Epoch) ([32]byte, error)
Tips() ([][32]byte, []types.Slot)
IsOptimistic(ctx context.Context, root [32]byte) (bool, error)
IsOptimistic(root [32]byte) (bool, error)
}
// BlockProcessor processes the block that's used for accounting fork choice.
type BlockProcessor interface {
InsertOptimisticBlock(ctx context.Context,
slot types.Slot,
root [32]byte,
parentRoot [32]byte,
justifiedEpoch types.Epoch,
finalizedEpoch types.Epoch,
) error
ProcessBlock(context.Context, types.Slot, [32]byte, [32]byte, types.Epoch, types.Epoch, bool) error
}
// AttestationProcessor processes the attestation that's used for accounting fork choice.
@@ -57,18 +49,9 @@ type ProposerBooster interface {
// Getter returns fork choice related information.
type Getter interface {
HasNode([32]byte) bool
ProposerBoost() [fieldparams.RootLength]byte
Store() *protoarray.Store
HasParent(root [32]byte) bool
AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([]byte, error)
IsCanonical(root [32]byte) bool
FinalizedEpoch() types.Epoch
JustifiedEpoch() types.Epoch
ForkChoiceNodes() []*pbrpc.ForkChoiceNode
NodeCount() int
}
// Setter allows to set forkchoice information
type Setter interface {
SetOptimisticToValid(context.Context, [fieldparams.RootLength]byte) error
SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte) error
}

View File

@@ -5,7 +5,7 @@ go_library(
srcs = [
"doc.go",
"errors.go",
"helpers.go",
"forkchoice.go",
"metrics.go",
"node.go",
"optimistic_sync.go",
@@ -21,8 +21,6 @@ go_library(
deps = [
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
@@ -36,7 +34,7 @@ go_test(
name = "go_default_test",
srcs = [
"ffg_update_test.go",
"helpers_test.go",
"forkchoice_test.go",
"no_vote_test.go",
"node_test.go",
"optimistic_sync_test.go",

View File

@@ -2,14 +2,9 @@ package protoarray
import "errors"
var errNilNode = errors.New("invalid nil or unknown node")
var errInvalidBalance = errors.New("invalid node balance")
var errInvalidProposerBoostRoot = errors.New("invalid proposer boost root")
var errUnknownFinalizedRoot = errors.New("unknown finalized root")
var errUnknownJustifiedRoot = errors.New("unknown justified root")
var errInvalidNodeIndex = errors.New("node index is invalid")
var errUnknownNodeRoot = errors.New("unknown block root")
var errInvalidJustifiedIndex = errors.New("justified index is invalid")
var errInvalidBestChildIndex = errors.New("best child index is invalid")
var errInvalidBestDescendantIndex = errors.New("best descendant index is invalid")
var errInvalidParentDelta = errors.New("parent delta is invalid")
var errInvalidNodeDelta = errors.New("node delta is invalid")
var errInvalidDeltaLength = errors.New("delta length is invalid")
var errInvalidSyncedTips = errors.New("invalid synced tips")
var errInvalidOptimisticStatus = errors.New("invalid optimistic status")

View File

@@ -27,9 +27,9 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
// 2 <- justified: 1, finalized: 0
// |
// 3 <- justified: 2, finalized: 1
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(3), indexToHash(2), 2, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(3), indexToHash(2), 2, 1, false))
// With starting justified epoch at 0, the head should be 3:
// 0 <- start
@@ -89,17 +89,17 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
// | |
// justified: 2, finalized: 0 -> 9 10 <- justified: 2, finalized: 0
// Left branch.
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(3), indexToHash(1), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(5), indexToHash(3), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(7), indexToHash(5), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(9), indexToHash(7), 2, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(3), indexToHash(1), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(5), indexToHash(3), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(7), indexToHash(5), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(9), indexToHash(7), 2, 0, false))
// Right branch.
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(4), indexToHash(2), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(6), indexToHash(4), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(8), indexToHash(6), 1, 0))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(10), indexToHash(8), 2, 0))
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(4), indexToHash(2), 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(6), indexToHash(4), 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(8), indexToHash(6), 1, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(10), indexToHash(8), 2, 0, false))
// With start at 0, the head should be 10:
// 0 <-- start
@@ -183,18 +183,11 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
}
func setup(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
f := New(justifiedEpoch, finalizedEpoch, params.BeaconConfig().ZeroHash)
f.store.nodesIndices[params.BeaconConfig().ZeroHash] = 0
f.store.nodes = append(f.store.nodes, &Node{
slot: 0,
root: params.BeaconConfig().ZeroHash,
parent: NonExistentNode,
justifiedEpoch: justifiedEpoch,
finalizedEpoch: finalizedEpoch,
bestChild: NonExistentNode,
bestDescendant: NonExistentNode,
weight: 0,
})
ctx := context.Background()
f := New(justifiedEpoch, finalizedEpoch)
err := f.ProcessBlock(ctx, 0, params.BeaconConfig().ZeroHash, [32]byte{}, justifiedEpoch, finalizedEpoch, false)
if err != nil {
return nil
}
return f
}

View File

@@ -1,4 +1,4 @@
package doublylinkedtree
package protoarray
import (
"context"
@@ -7,10 +7,12 @@ import (
types "github.com/prysmaticlabs/eth2-types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"go.opencensus.io/trace"
)
// This tracks the last reported head root. Used for metrics.
var lastHeadRoot [32]byte
// New initializes a new fork choice store.
func New(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
s := &Store{
@@ -26,10 +28,8 @@ func New(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
return &ForkChoice{store: s, balances: b, votes: v}
}
// NodeCount returns the current number of nodes in the Store.
// NodeCount returns the current number of nodes in the Store
func (f *ForkChoice) NodeCount() int {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
return len(f.store.nodeByRoot)
}
@@ -42,7 +42,7 @@ func (f *ForkChoice) Head(
justifiedStateBalances []uint64,
finalizedEpoch types.Epoch,
) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Head")
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.Head")
defer span.End()
f.votesLock.Lock()
defer f.votesLock.Unlock()
@@ -77,7 +77,7 @@ func (f *ForkChoice) Head(
// ProcessAttestation processes attestation for vote accounting, it iterates around validator indices
// and update their votes accordingly.
func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []uint64, blockRoot [32]byte, targetEpoch types.Epoch) {
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.ProcessAttestation")
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.ProcessAttestation")
defer span.End()
f.votesLock.Lock()
defer f.votesLock.Unlock()
@@ -102,17 +102,17 @@ func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []
processedAttestationCount.Inc()
}
// InsertOptimisticBlock processes a new block by inserting it to the fork choice store.
func (f *ForkChoice) InsertOptimisticBlock(
// ProcessBlock processes a new block by inserting it to the fork choice store.
func (f *ForkChoice) ProcessBlock(
ctx context.Context,
slot types.Slot,
blockRoot, parentRoot [fieldparams.RootLength]byte,
justifiedEpoch, finalizedEpoch types.Epoch,
justifiedEpoch, finalizedEpoch types.Epoch, optimistic bool,
) error {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.InsertOptimisticBlock")
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.ProcessBlock")
defer span.End()
return f.store.insert(ctx, slot, blockRoot, parentRoot, justifiedEpoch, finalizedEpoch)
return f.store.insert(ctx, slot, blockRoot, parentRoot, justifiedEpoch, finalizedEpoch, optimistic)
}
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
@@ -168,7 +168,7 @@ func (f *ForkChoice) IsCanonical(root [32]byte) bool {
}
// IsOptimistic returns true if the given root has been optimistically synced.
func (f *ForkChoice) IsOptimistic(_ context.Context, root [32]byte) (bool, error) {
func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
@@ -180,6 +180,11 @@ func (f *ForkChoice) IsOptimistic(_ context.Context, root [32]byte) (bool, error
return node.optimistic, nil
}
// Store returns the fork choice store
func (f *ForkChoice) Store() *Store {
return f.store
}
// AncestorRoot returns the ancestor root of input block root at a given slot.
func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "protoArray.AncestorRoot")
@@ -267,41 +272,3 @@ func (f *ForkChoice) updateBalances(newBalances []uint64) error {
func (f *ForkChoice) Tips() ([][32]byte, []types.Slot) {
return f.store.tips()
}
// ProposerBoost returns the proposerBoost of the store
func (f *ForkChoice) ProposerBoost() [fieldparams.RootLength]byte {
return f.store.proposerBoost()
}
// SetOptimisticToValid sets the node with the given root as a fully validated node
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams.RootLength]byte) error {
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return errNilNode
}
return node.setNodeAndParentValidated(ctx)
}
// JustifiedEpoch of fork choice store.
func (f *ForkChoice) JustifiedEpoch() types.Epoch {
return f.store.justifiedEpoch
}
// FinalizedEpoch of fork choice store.
func (f *ForkChoice) FinalizedEpoch() types.Epoch {
return f.store.finalizedEpoch
}
func (f *ForkChoice) ForkChoiceNodes() []*pbrpc.ForkChoiceNode {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
ret := make([]*pbrpc.ForkChoiceNode, len(f.store.nodeByRoot))
return f.store.treeRootNode.rpcNodes(ret)
}
// SetOptimisticToInvalid removes a block with an invalid execution payload from fork choice store
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root [fieldparams.RootLength]byte) error {
return f.store.removeNode(ctx, root)
}

View File

@@ -1,4 +1,4 @@
package doublylinkedtree
package protoarray
import (
"context"
@@ -15,9 +15,9 @@ import (
func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
f.votes = []Vote{
{indexToHash(1), indexToHash(1), 0},
@@ -37,9 +37,9 @@ func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
s := f.store
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
@@ -61,12 +61,12 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
func TestForkChoice_IsCanonical(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(1), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(2), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(4), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 6, indexToHash(6), indexToHash(5), 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(1), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 4, indexToHash(4), indexToHash(2), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(5), indexToHash(4), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 6, indexToHash(6), indexToHash(5), 1, 1, false))
require.Equal(t, true, f.IsCanonical(params.BeaconConfig().ZeroHash))
require.Equal(t, false, f.IsCanonical(indexToHash(1)))
@@ -80,12 +80,12 @@ func TestForkChoice_IsCanonical(t *testing.T) {
func TestForkChoice_IsCanonicalReorg(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, [32]byte{'2'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, [32]byte{'4'}, [32]byte{'2'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, [32]byte{'5'}, [32]byte{'4'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 6, [32]byte{'6'}, [32]byte{'5'}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, [32]byte{'2'}, params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 4, [32]byte{'4'}, [32]byte{'2'}, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 5, [32]byte{'5'}, [32]byte{'4'}, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 6, [32]byte{'6'}, [32]byte{'5'}, 1, 1, false))
f.store.nodesLock.Lock()
f.store.nodeByRoot[[32]byte{'3'}].balance = 10
@@ -114,9 +114,9 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
func TestForkChoice_AncestorRoot(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(3), indexToHash(2), 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(3), indexToHash(2), 1, 1, false))
f.store.treeRootNode = f.store.nodeByRoot[indexToHash(1)]
f.store.treeRootNode.parent = nil
@@ -140,8 +140,8 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
func TestForkChoice_AncestorEqualSlot(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'3'}, [32]byte{'1'}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'3'}, [32]byte{'1'}, 1, 1, false))
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 100)
require.NoError(t, err)
@@ -152,8 +152,8 @@ func TestForkChoice_AncestorEqualSlot(t *testing.T) {
func TestForkChoice_AncestorLowerSlot(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 200, [32]byte{'3'}, [32]byte{'1'}, 1, 1))
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 200, [32]byte{'3'}, [32]byte{'1'}, 1, 1, false))
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 150)
require.NoError(t, err)

View File

@@ -1,101 +0,0 @@
package protoarray
import (
"context"
"github.com/prysmaticlabs/prysm/config/params"
pmath "github.com/prysmaticlabs/prysm/math"
"go.opencensus.io/trace"
)
// This computes validator balance delta from validator votes.
// It returns a list of deltas that represents the difference between old balances and new balances.
func computeDeltas(
ctx context.Context,
blockIndices map[[32]byte]uint64,
votes []Vote,
oldBalances, newBalances []uint64,
) ([]int, []Vote, error) {
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.computeDeltas")
defer span.End()
deltas := make([]int, len(blockIndices))
for validatorIndex, vote := range votes {
oldBalance := uint64(0)
newBalance := uint64(0)
// Skip if validator has never voted for current root and next root (i.e. if the
// votes are zero hash aka genesis block), there's nothing to compute.
if vote.currentRoot == params.BeaconConfig().ZeroHash && vote.nextRoot == params.BeaconConfig().ZeroHash {
continue
}
// If the validator index did not exist in `oldBalance` or `newBalance` list above, the balance is just 0.
if validatorIndex < len(oldBalances) {
oldBalance = oldBalances[validatorIndex]
}
if validatorIndex < len(newBalances) {
newBalance = newBalances[validatorIndex]
}
// Perform delta only if the validator's balance or vote has changed.
if vote.currentRoot != vote.nextRoot || oldBalance != newBalance {
// Ignore the vote if it's not known in `blockIndices`,
// that means we have not seen the block before.
nextDeltaIndex, ok := blockIndices[vote.nextRoot]
if ok {
// Protection against out of bound, the `nextDeltaIndex` which defines
// the block location in the dag can not exceed the total `delta` length.
if nextDeltaIndex >= uint64(len(deltas)) {
return nil, nil, errInvalidNodeDelta
}
delta, err := pmath.Int(newBalance)
if err != nil {
return nil, nil, err
}
deltas[nextDeltaIndex] += delta
}
currentDeltaIndex, ok := blockIndices[vote.currentRoot]
if ok {
// Protection against out of bound (same as above)
if currentDeltaIndex >= uint64(len(deltas)) {
return nil, nil, errInvalidNodeDelta
}
delta, err := pmath.Int(oldBalance)
if err != nil {
return nil, nil, err
}
deltas[currentDeltaIndex] -= delta
}
}
// Rotate the validator vote.
vote.currentRoot = vote.nextRoot
votes[validatorIndex] = vote
}
return deltas, votes, nil
}
// This return a copy of the proto array node object.
func copyNode(node *Node) *Node {
if node == nil {
return &Node{}
}
copiedRoot := [32]byte{}
copy(copiedRoot[:], node.root[:])
return &Node{
slot: node.slot,
root: copiedRoot,
parent: node.parent,
justifiedEpoch: node.justifiedEpoch,
finalizedEpoch: node.finalizedEpoch,
weight: node.weight,
bestChild: node.bestChild,
bestDescendant: node.bestDescendant,
}
}

View File

@@ -1,249 +0,0 @@
package protoarray
import (
"context"
"encoding/binary"
"testing"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/crypto/hash"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestComputeDelta_ZeroHash(t *testing.T) {
validatorCount := uint64(16)
indices := make(map[[32]byte]uint64)
votes := make([]Vote, 0)
oldBalances := make([]uint64, 0)
newBalances := make([]uint64, 0)
for i := uint64(0); i < validatorCount; i++ {
indices[indexToHash(i)] = i
votes = append(votes, Vote{params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0})
oldBalances = append(oldBalances, 0)
newBalances = append(newBalances, 0)
}
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances)
require.NoError(t, err)
assert.Equal(t, int(validatorCount), len(delta))
for _, d := range delta {
assert.Equal(t, 0, d)
}
for _, vote := range votes {
assert.Equal(t, vote.currentRoot, vote.nextRoot, "The vote should not have changed")
}
}
func TestComputeDelta_AllVoteTheSame(t *testing.T) {
validatorCount := uint64(16)
balance := uint64(32)
indices := make(map[[32]byte]uint64)
votes := make([]Vote, 0)
oldBalances := make([]uint64, 0)
newBalances := make([]uint64, 0)
for i := uint64(0); i < validatorCount; i++ {
indices[indexToHash(i)] = i
votes = append(votes, Vote{params.BeaconConfig().ZeroHash, indexToHash(0), 0})
oldBalances = append(oldBalances, balance)
newBalances = append(newBalances, balance)
}
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances)
require.NoError(t, err)
assert.Equal(t, int(validatorCount), len(delta))
for i, d := range delta {
if i == 0 {
assert.Equal(t, balance*validatorCount, uint64(d))
} else {
assert.Equal(t, 0, d)
}
}
for _, vote := range votes {
assert.Equal(t, vote.currentRoot, vote.nextRoot, "The vote should not have changed")
}
}
func TestComputeDelta_DifferentVotes(t *testing.T) {
validatorCount := uint64(16)
balance := uint64(32)
indices := make(map[[32]byte]uint64)
votes := make([]Vote, 0)
oldBalances := make([]uint64, 0)
newBalances := make([]uint64, 0)
for i := uint64(0); i < validatorCount; i++ {
indices[indexToHash(i)] = i
votes = append(votes, Vote{params.BeaconConfig().ZeroHash, indexToHash(i), 0})
oldBalances = append(oldBalances, balance)
newBalances = append(newBalances, balance)
}
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances)
require.NoError(t, err)
assert.Equal(t, int(validatorCount), len(delta))
for _, d := range delta {
assert.Equal(t, balance, uint64(d))
}
for _, vote := range votes {
assert.Equal(t, vote.currentRoot, vote.nextRoot, "The vote should not have changed")
}
}
func TestComputeDelta_MovingVotes(t *testing.T) {
validatorCount := uint64(16)
balance := uint64(32)
indices := make(map[[32]byte]uint64)
votes := make([]Vote, 0)
oldBalances := make([]uint64, 0)
newBalances := make([]uint64, 0)
lastIndex := uint64(len(indices) - 1)
for i := uint64(0); i < validatorCount; i++ {
indices[indexToHash(i)] = i
votes = append(votes, Vote{indexToHash(0), indexToHash(lastIndex), 0})
oldBalances = append(oldBalances, balance)
newBalances = append(newBalances, balance)
}
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances)
require.NoError(t, err)
assert.Equal(t, int(validatorCount), len(delta))
for i, d := range delta {
if i == 0 {
assert.Equal(t, -int(balance*validatorCount), d, "First root should have negative delta")
} else if i == int(lastIndex) {
assert.Equal(t, int(balance*validatorCount), d, "Last root should have positive delta")
} else {
assert.Equal(t, 0, d)
}
}
for _, vote := range votes {
assert.Equal(t, vote.currentRoot, vote.nextRoot, "The vote should not have changed")
}
}
func TestComputeDelta_MoveOutOfTree(t *testing.T) {
balance := uint64(32)
indices := make(map[[32]byte]uint64)
votes := make([]Vote, 0)
oldBalances := []uint64{balance, balance}
newBalances := []uint64{balance, balance}
indices[indexToHash(1)] = 0
votes = append(votes,
Vote{indexToHash(1), params.BeaconConfig().ZeroHash, 0},
Vote{indexToHash(1), [32]byte{'A'}, 0})
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances)
require.NoError(t, err)
assert.Equal(t, 1, len(delta))
assert.Equal(t, 0-2*int(balance), delta[0])
for _, vote := range votes {
assert.Equal(t, vote.currentRoot, vote.nextRoot, "The vote should not have changed")
}
}
func TestComputeDelta_ChangingBalances(t *testing.T) {
oldBalance := uint64(32)
newBalance := oldBalance * 2
validatorCount := uint64(16)
indices := make(map[[32]byte]uint64)
votes := make([]Vote, 0)
oldBalances := make([]uint64, 0)
newBalances := make([]uint64, 0)
indices[indexToHash(1)] = 0
for i := uint64(0); i < validatorCount; i++ {
indices[indexToHash(i)] = i
votes = append(votes, Vote{indexToHash(0), indexToHash(1), 0})
oldBalances = append(oldBalances, oldBalance)
newBalances = append(newBalances, newBalance)
}
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances)
require.NoError(t, err)
assert.Equal(t, 16, len(delta))
for i, d := range delta {
if i == 0 {
assert.Equal(t, -int(oldBalance*validatorCount), d, "First root should have negative delta")
} else if i == 1 {
assert.Equal(t, int(newBalance*validatorCount), d, "Last root should have positive delta")
} else {
assert.Equal(t, 0, d)
}
}
for _, vote := range votes {
assert.Equal(t, vote.currentRoot, vote.nextRoot, "The vote should not have changed")
}
}
func TestComputeDelta_ValidatorAppear(t *testing.T) {
balance := uint64(32)
indices := make(map[[32]byte]uint64)
votes := make([]Vote, 0)
oldBalances := []uint64{balance}
newBalances := []uint64{balance, balance}
indices[indexToHash(1)] = 0
indices[indexToHash(2)] = 1
votes = append(votes,
Vote{indexToHash(1), indexToHash(2), 0},
Vote{indexToHash(1), indexToHash(2), 0})
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances)
require.NoError(t, err)
assert.Equal(t, 2, len(delta))
assert.Equal(t, 0-int(balance), delta[0])
assert.Equal(t, 2*int(balance), delta[1])
for _, vote := range votes {
assert.Equal(t, vote.currentRoot, vote.nextRoot, "The vote should not have changed")
}
}
func TestComputeDelta_ValidatorDisappears(t *testing.T) {
balance := uint64(32)
indices := make(map[[32]byte]uint64)
votes := make([]Vote, 0)
oldBalances := []uint64{balance, balance}
newBalances := []uint64{balance}
indices[indexToHash(1)] = 0
indices[indexToHash(2)] = 1
votes = append(votes,
Vote{indexToHash(1), indexToHash(2), 0},
Vote{indexToHash(1), indexToHash(2), 0})
delta, _, err := computeDeltas(context.Background(), indices, votes, oldBalances, newBalances)
require.NoError(t, err)
assert.Equal(t, 2, len(delta))
assert.Equal(t, 0-2*int(balance), delta[0])
assert.Equal(t, int(balance), delta[1])
for _, vote := range votes {
assert.Equal(t, vote.currentRoot, vote.nextRoot, "The vote should not have changed")
}
}
func indexToHash(i uint64) [32]byte {
var b [8]byte
binary.LittleEndian.PutUint64(b[:], i)
return hash.Hash(b[:])
}

View File

@@ -48,16 +48,10 @@ var (
Help: "The number of times pruning happened.",
},
)
lastSyncedTipSlot = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "proto_array_last_synced_tip_slot",
Help: "The slot of the last fully validated block added to the proto array.",
},
)
syncedTipsCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "proto_array_synced_tips_count",
Help: "The number of elements in the syncedTips structure.",
optimisticCount = promauto.NewCounter(
prometheus.CounterOpts{
Name: "proto_array_optimistic_count",
Help: "The number of blocks that have been optimistically synced.",
},
)
)

View File

@@ -24,7 +24,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// 0
// /
// 2 <- head
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -33,7 +33,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// 0
// / \
// head -> 2 1
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -44,7 +44,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// head -> 2 1
// |
// 3
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1, false))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
@@ -55,7 +55,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// 2 1
// | |
// head -> 4 3
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(4), indexToHash(2), 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(4), indexToHash(2), 1, 1, false))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
@@ -68,7 +68,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// head -> 4 3
// |
// 5 <- justified epoch = 2
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 1, false))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
@@ -107,7 +107,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
// 5
// |
// 6 <- head
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(5), 2, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(5), 2, 1, false))
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 1)
require.NoError(t, err)
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 2")

View File

@@ -1,7 +1,11 @@
package protoarray
import (
"bytes"
"context"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
)
// Slot of the fork choice node.
@@ -9,13 +13,23 @@ func (n *Node) Slot() types.Slot {
return n.slot
}
// Balance returns the current balance of the Node
func (n *Node) Balance() uint64 {
return n.balance
}
// Optimistic returns the optimistic status of the node
func (n *Node) Optimistic() bool {
return n.optimistic
}
// Root of the fork choice node.
func (n *Node) Root() [32]byte {
return n.root
}
// Parent of the fork choice node.
func (n *Node) Parent() uint64 {
func (n *Node) Parent() *Node {
return n.parent
}
@@ -34,17 +48,126 @@ func (n *Node) Weight() uint64 {
return n.weight
}
// BestChild of the fork choice node.
func (n *Node) BestChild() uint64 {
return n.bestChild
// Children returns the children of this node
func (n *Node) Children() []*Node {
return n.children
}
// depth returns the length of the path to the root of Fork Choice
func (n *Node) depth() uint64 {
ret := uint64(0)
for node := n.parent; node != nil; node = node.parent {
ret += 1
}
return ret
}
// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
// using the current balance stored in each node. This function requires a lock
// in Store.nodesLock
func (n *Node) applyWeightChanges(ctx context.Context) error {
// Recursively calling the children to sum their weights.
childrenWeight := uint64(0)
for _, child := range n.children {
if ctx.Err() != nil {
return ctx.Err()
}
if err := child.applyWeightChanges(ctx); err != nil {
return err
}
childrenWeight += child.weight
}
if n.root == params.BeaconConfig().ZeroHash {
return nil
}
n.weight = n.balance + childrenWeight
return nil
}
// updateBestDescendant updates the best descendant of this node and its children.
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch types.Epoch) error {
if ctx.Err() != nil {
return ctx.Err()
}
if len(n.children) == 0 {
n.bestDescendant = nil
return nil
}
var bestChild *Node
bestWeight := uint64(0)
hasViableDescendant := false
for _, child := range n.children {
if child == nil {
return errNilNode
}
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch); err != nil {
return err
}
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, finalizedEpoch)
if childLeadsToViableHead && !hasViableDescendant {
// The child leads to a viable head, but the current
// parent's best child doesn't.
bestWeight = child.weight
bestChild = child
hasViableDescendant = true
} else if childLeadsToViableHead {
// If both are viable, compare their weights.
if child.weight == bestWeight {
// Tie-breaker of equal weights by root.
if bytes.Compare(child.root[:], bestChild.root[:]) > 0 {
bestChild = child
}
} else if child.weight > bestWeight {
bestChild = child
bestWeight = child.weight
}
}
}
if hasViableDescendant {
if bestChild.bestDescendant == nil {
n.bestDescendant = bestChild
} else {
n.bestDescendant = bestChild.bestDescendant
}
} else {
n.bestDescendant = nil
}
return nil
}
// viableForHead returns true if the node is viable to head.
// Any node with different finalized or justified epoch than
// the ones in fork choice store should not be viable to head.
func (n *Node) viableForHead(justifiedEpoch, finalizedEpoch types.Epoch) bool {
justified := justifiedEpoch == n.justifiedEpoch || justifiedEpoch == 0
finalized := finalizedEpoch == n.finalizedEpoch || finalizedEpoch == 0
return justified && finalized
}
func (n *Node) leadsToViableHead(justifiedEpoch, finalizedEpoch types.Epoch) bool {
if n.bestDescendant == nil {
return n.viableForHead(justifiedEpoch, finalizedEpoch)
}
return n.bestDescendant.viableForHead(justifiedEpoch, finalizedEpoch)
}
// BestDescendant of the fork choice node.
func (n *Node) BestDescendant() uint64 {
func (n *Node) BestDescendant() *Node {
return n.bestDescendant
}
// Graffiti of the fork choice node.
func (n *Node) Graffiti() [32]byte {
return n.graffiti
// setNodeAndParentValidated sets the current node and the parent as validated (i.e. non-optimistic).
func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
if ctx.Err() != nil {
return ctx.Err()
}
if !n.optimistic || n.parent == nil {
return nil
}
n.optimistic = false
return n.parent.setNodeAndParentValidated(ctx)
}

View File

@@ -1,22 +1,23 @@
package protoarray
import (
"context"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
func TestNode_Getters(t *testing.T) {
slot := types.Slot(100)
root := [32]byte{'a'}
parent := uint64(10)
parent := &Node{}
jEpoch := types.Epoch(20)
fEpoch := types.Epoch(30)
weight := uint64(10000)
bestChild := uint64(5)
bestDescendant := uint64(4)
graffiti := [32]byte{'b'}
balance := uint64(10)
n := &Node{
slot: slot,
root: root,
@@ -24,9 +25,7 @@ func TestNode_Getters(t *testing.T) {
justifiedEpoch: jEpoch,
finalizedEpoch: fEpoch,
weight: weight,
bestChild: bestChild,
bestDescendant: bestDescendant,
graffiti: graffiti,
balance: balance,
}
require.Equal(t, slot, n.Slot())
@@ -35,7 +34,199 @@ func TestNode_Getters(t *testing.T) {
require.Equal(t, jEpoch, n.JustifiedEpoch())
require.Equal(t, fEpoch, n.FinalizedEpoch())
require.Equal(t, weight, n.Weight())
require.Equal(t, bestChild, n.BestChild())
require.Equal(t, bestDescendant, n.BestDescendant())
require.Equal(t, graffiti, n.Graffiti())
descendantNil := n.bestDescendant == nil
require.Equal(t, true, descendantNil)
}
func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
// The updated balances of each node is 100
s := f.store
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
}
func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
// The updated balances of each node is 100
s := f.store
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
s.nodeByRoot[indexToHash(1)].weight = 400
s.nodeByRoot[indexToHash(2)].weight = 400
s.nodeByRoot[indexToHash(3)].weight = 400
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
}
func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is not viable.
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 2, 3, false))
// Verify parent's best child and best descendant are `none`.
s := f.store
assert.Equal(t, 1, len(s.treeRootNode.children))
nilBestDescendant := s.treeRootNode.bestDescendant == nil
assert.Equal(t, true, nilBestDescendant)
}
func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
s := f.store
assert.Equal(t, 1, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
}
func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 100
s.nodeByRoot[indexToHash(2)].weight = 200
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1))
assert.Equal(t, 2, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[1], s.treeRootNode.bestDescendant)
}
func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 200
s.nodeByRoot[indexToHash(2)].weight = 100
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1))
assert.Equal(t, 2, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
}
func TestNode_TestDepth(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1, false))
s := f.store
require.Equal(t, s.nodeByRoot[indexToHash(2)].depth(), uint64(2))
require.Equal(t, s.nodeByRoot[indexToHash(3)].depth(), uint64(1))
}
func TestNode_ViableForHead(t *testing.T) {
tests := []struct {
n *Node
justifiedEpoch types.Epoch
finalizedEpoch types.Epoch
want bool
}{
{&Node{}, 0, 0, true},
{&Node{}, 1, 0, false},
{&Node{}, 0, 1, false},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 1, 1, true},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 2, 2, false},
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, 3, true},
}
for _, tc := range tests {
got := tc.n.viableForHead(tc.justifiedEpoch, tc.finalizedEpoch)
assert.Equal(t, tc.want, got)
}
}
func TestNode_LeadsToViableHead(t *testing.T) {
f := setup(4, 3)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(1), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 4, indexToHash(4), indexToHash(2), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(5), indexToHash(3), 4, 3, false))
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 3))
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 3))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 3))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 3))
}
func TestNode_SetFullyValidated(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// insert blocks in the fork pattern (optimistic status in parenthesis)
//
// 0 (false) -- 1 (false) -- 2 (false) -- 3 (true) -- 4 (true)
// \
// -- 5 (true)
//
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 4, indexToHash(4), indexToHash(3), 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(5), indexToHash(1), 1, 1, true))
opt, err := f.IsOptimistic(indexToHash(5))
require.NoError(t, err)
require.Equal(t, true, opt)
opt, err = f.IsOptimistic(indexToHash(4))
require.NoError(t, err)
require.Equal(t, true, opt)
require.NoError(t, f.store.nodeByRoot[indexToHash(4)].setNodeAndParentValidated(ctx))
// block 5 should still be optimistic
opt, err = f.IsOptimistic(indexToHash(5))
require.NoError(t, err)
require.Equal(t, true, opt)
// block 4 and 3 should now be valid
opt, err = f.IsOptimistic(indexToHash(4))
require.NoError(t, err)
require.Equal(t, false, opt)
opt, err = f.IsOptimistic(indexToHash(3))
require.NoError(t, err)
require.Equal(t, false, opt)
}

View File

@@ -2,315 +2,49 @@ package protoarray
import (
"context"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
)
// This returns the minimum and maximum slot of the synced_tips tree
func (f *ForkChoice) boundarySyncedTips() (types.Slot, types.Slot) {
f.syncedTips.RLock()
defer f.syncedTips.RUnlock()
// removeNode removes the node with the given root and all of its children
// from the Fork Choice Store.
func (s *Store) removeNode(ctx context.Context, root [32]byte) error {
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
min := params.BeaconConfig().FarFutureSlot
max := types.Slot(0)
for _, slot := range f.syncedTips.validatedTips {
if slot > max {
max = slot
}
if slot < min {
min = slot
}
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
return errNilNode
}
return min, max
}
// IsOptimistic returns true if this node is optimistically synced
// A optimistically synced block is synced as usual, but its
// execution payload is not validated, while the EL is still syncing.
// This function returns an error if the block is not found in the fork choice
// store
func (f *ForkChoice) IsOptimistic(ctx context.Context, root [32]byte) (bool, error) {
if ctx.Err() != nil {
return false, ctx.Err()
if !node.optimistic || node.parent == nil {
return errInvalidOptimisticStatus
}
// If we reached this point then the block has to be in the Fork Choice
// Store!
f.store.nodesLock.RLock()
index, ok := f.store.nodesIndices[root]
if !ok {
f.store.nodesLock.RUnlock()
return false, errUnknownNodeRoot
}
node := f.store.nodes[index]
slot := node.slot
// If the node is a synced tip, then it's fully validated
f.syncedTips.RLock()
_, ok = f.syncedTips.validatedTips[root]
if ok {
f.syncedTips.RUnlock()
f.store.nodesLock.RUnlock()
return false, nil
}
f.syncedTips.RUnlock()
// If the slot is higher than the max synced tip, it's optimistic
min, max := f.boundarySyncedTips()
if slot > max {
f.store.nodesLock.RUnlock()
return true, nil
}
// If the slot is lower than the min synced tip, it's fully validated
if slot <= min {
f.store.nodesLock.RUnlock()
return false, nil
}
// if the node is a leaf of the Fork Choice tree, then it's
// optimistic
childIndex := node.BestChild()
if childIndex == NonExistentNode {
f.store.nodesLock.RUnlock()
return true, nil
}
// recurse to the child
child := f.store.nodes[childIndex]
root = child.root
f.store.nodesLock.RUnlock()
return f.IsOptimistic(ctx, root)
}
// This function returns the index of sync tip node that's ancestor to the input node.
// In the event of none, `NonExistentNode` is returned.
// This internal method assumes the caller holds a lock on syncedTips and s.nodesLock
func (s *Store) findSyncedTip(ctx context.Context, node *Node, syncedTips *optimisticStore) (uint64, error) {
for {
if ctx.Err() != nil {
return 0, ctx.Err()
}
if _, ok := syncedTips.validatedTips[node.root]; ok {
return s.nodesIndices[node.root], nil
}
if node.parent == NonExistentNode {
return NonExistentNode, nil
}
node = s.nodes[node.parent]
}
}
// SetOptimisticToValid is called with the root of a block that was returned as
// VALID by the EL. This routine recomputes and updates the synced_tips map to
// account for this new tip.
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [32]byte) error {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
// We can only update if given root is in Fork Choice
index, ok := f.store.nodesIndices[root]
if !ok {
return errInvalidNodeIndex
}
// We can only update if root is a leaf in Fork Choice
node := f.store.nodes[index]
if node.bestChild != NonExistentNode {
return errInvalidBestChildIndex
}
// Stop early if the root is part of validated tips
f.syncedTips.Lock()
defer f.syncedTips.Unlock()
_, ok = f.syncedTips.validatedTips[root]
if ok {
return nil
}
// Cache root and slot to validated tips
newTips := make(map[[32]byte]types.Slot)
newValidSlot := node.slot
newTips[root] = newValidSlot
// Compute the full valid path from the given node to its previous synced tip
// This path will now consist of fully validated blocks. Notice that
// the previous tip may have been outside the Fork Choice store.
// In this case, only one block can be in syncedTips as the whole
// Fork Choice would be a descendant of this block.
validPath := make(map[uint64]bool)
validPath[index] = true
for {
if ctx.Err() != nil {
return ctx.Err()
}
parentIndex := node.parent
if parentIndex == NonExistentNode {
break
}
if parentIndex >= uint64(len(f.store.nodes)) {
return errInvalidNodeIndex
}
node = f.store.nodes[parentIndex]
_, ok = f.syncedTips.validatedTips[node.root]
if ok {
break
}
validPath[parentIndex] = true
}
// Retrieve the list of leaves in the Fork Choice
// These are all the nodes that have NonExistentNode as best child.
leaves, err := f.store.leaves()
if err != nil {
return err
}
// For each leaf, recompute the new tip.
for _, i := range leaves {
node = f.store.nodes[i]
j := i
for {
if ctx.Err() != nil {
return ctx.Err()
}
// Stop if we reached the previous tip
_, ok = f.syncedTips.validatedTips[node.root]
if ok {
newTips[node.root] = node.slot
break
}
// Stop if we reach valid path
_, ok = validPath[j]
if ok {
newTips[node.root] = node.slot
break
}
j = node.parent
if j == NonExistentNode {
break
}
if j >= uint64(len(f.store.nodes)) {
return errInvalidNodeIndex
}
node = f.store.nodes[j]
}
}
f.syncedTips.validatedTips = newTips
lastSyncedTipSlot.Set(float64(newValidSlot))
syncedTipsCount.Set(float64(len(newTips)))
return nil
}
// SetOptimisticToInvalid updates the synced_tips map when the block with the given root becomes INVALID.
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root [32]byte) error {
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
idx, ok := f.store.nodesIndices[root]
if !ok {
return errInvalidNodeIndex
}
node := f.store.nodes[idx]
// We only support changing status for the tips in Fork Choice store.
if node.bestChild != NonExistentNode {
return errInvalidNodeIndex
}
parentIndex := node.parent
// This should not happen
if parentIndex == NonExistentNode {
return errInvalidNodeIndex
}
// Update the weights of the nodes subtracting the INVALID node's weight
weight := node.weight
node = f.store.nodes[parentIndex]
for {
if ctx.Err() != nil {
return ctx.Err()
}
node.weight -= weight
if node.parent == NonExistentNode {
break
}
node = f.store.nodes[node.parent]
}
parent := copyNode(f.store.nodes[parentIndex])
// delete the invalid node, order is important
f.store.nodes = append(f.store.nodes[:idx], f.store.nodes[idx+1:]...)
delete(f.store.nodesIndices, root)
// Fix parent and best child for each node
for _, node := range f.store.nodes {
if node.parent == NonExistentNode {
node.parent = NonExistentNode
} else if node.parent > idx {
node.parent -= 1
}
if node.bestChild == NonExistentNode || node.bestChild == idx {
node.bestChild = NonExistentNode
} else if node.bestChild > idx {
node.bestChild -= 1
}
if node.bestDescendant == NonExistentNode || node.bestDescendant == idx {
node.bestDescendant = NonExistentNode
} else if node.bestDescendant > idx {
node.bestDescendant -= 1
}
}
// Update the parent's best child and best descendant if necessary.
if parent.bestChild == idx || parent.bestDescendant == idx {
for childIndex, child := range f.store.nodes {
if child.parent == parentIndex {
err := f.store.updateBestChildAndDescendant(
parentIndex, uint64(childIndex))
if err != nil {
return err
children := node.parent.children
if len(children) == 1 {
node.parent.children = []*Node{}
} else {
for i, n := range children {
if n == node {
if i != len(children)-1 {
children[i] = children[len(children)-1]
}
node.parent.children = children[:len(children)-2]
break
}
}
}
return s.removeNodeAndChildren(ctx, node)
}
// Return early if the parent is not a synced_tip.
f.syncedTips.Lock()
defer f.syncedTips.Unlock()
parentRoot := parent.root
_, ok = f.syncedTips.validatedTips[parentRoot]
if !ok {
return nil
}
leaves, err := f.store.leaves()
if err != nil {
return err
}
for _, i := range leaves {
node = f.store.nodes[i]
for {
if ctx.Err() != nil {
return ctx.Err()
}
// Return early if the parent is still a synced tip
if node.root == parentRoot {
return nil
}
_, ok = f.syncedTips.validatedTips[node.root]
if ok {
break
}
if node.parent == NonExistentNode {
break
}
node = f.store.nodes[node.parent]
// removeNodeAndChildren removes `node` and all of its descendant from the Store
func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node) error {
for _, child := range node.children {
if ctx.Err() != nil {
return ctx.Err()
}
if err := s.removeNodeAndChildren(ctx, child); err != nil {
return err
}
}
delete(f.syncedTips.validatedTips, parentRoot)
syncedTipsCount.Set(float64(len(f.syncedTips.validatedTips)))
delete(s.nodeByRoot, node.root)
return nil
}

View File

@@ -4,206 +4,12 @@ import (
"context"
"testing"
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/testing/require"
)
// We test the algorithm to check the optimistic status of a node. The
// status for this test is the following branching diagram
//
// -- E -- F
// /
// -- C -- D
// /
// 0 -- 1 -- A -- B -- J -- K
// \ /
// -- G -- H -- I
//
// Here nodes 0, 1, A, B, C, D are fully validated and nodes
// E, F, G, H, J, K are optimistic.
// Synced Tips are nodes B, C, D
// nodes 0 and 1 are outside the Fork Choice Store.
func TestOptimistic(t *testing.T) {
root0 := bytesutil.ToBytes32([]byte("hello0"))
root1 := bytesutil.ToBytes32([]byte("hello1"))
nodeA := &Node{
slot: types.Slot(100),
root: bytesutil.ToBytes32([]byte("helloA")),
bestChild: 1,
}
nodeB := &Node{
slot: types.Slot(101),
root: bytesutil.ToBytes32([]byte("helloB")),
bestChild: 2,
parent: 0,
}
nodeC := &Node{
slot: types.Slot(102),
root: bytesutil.ToBytes32([]byte("helloC")),
bestChild: 3,
parent: 1,
}
nodeD := &Node{
slot: types.Slot(103),
root: bytesutil.ToBytes32([]byte("helloD")),
bestChild: NonExistentNode,
parent: 2,
}
nodeE := &Node{
slot: types.Slot(103),
root: bytesutil.ToBytes32([]byte("helloE")),
bestChild: 5,
parent: 2,
}
nodeF := &Node{
slot: types.Slot(104),
root: bytesutil.ToBytes32([]byte("helloF")),
bestChild: NonExistentNode,
parent: 4,
}
nodeG := &Node{
slot: types.Slot(102),
root: bytesutil.ToBytes32([]byte("helloG")),
bestChild: 7,
parent: 1,
}
nodeH := &Node{
slot: types.Slot(103),
root: bytesutil.ToBytes32([]byte("helloH")),
bestChild: 8,
parent: 6,
}
nodeI := &Node{
slot: types.Slot(104),
root: bytesutil.ToBytes32([]byte("helloI")),
bestChild: NonExistentNode,
parent: 7,
}
nodeJ := &Node{
slot: types.Slot(103),
root: bytesutil.ToBytes32([]byte("helloJ")),
bestChild: 10,
parent: 6,
}
nodeK := &Node{
slot: types.Slot(104),
root: bytesutil.ToBytes32([]byte("helloK")),
bestChild: NonExistentNode,
parent: 9,
}
nodes := []*Node{
nodeA,
nodeB,
nodeC,
nodeD,
nodeE,
nodeF,
nodeG,
nodeH,
nodeI,
nodeJ,
nodeK,
}
ni := map[[32]byte]uint64{
nodeA.root: 0,
nodeB.root: 1,
nodeC.root: 2,
nodeD.root: 3,
nodeE.root: 4,
nodeF.root: 5,
nodeG.root: 6,
nodeH.root: 7,
nodeI.root: 8,
nodeJ.root: 9,
nodeK.root: 10,
}
s := &Store{
nodes: nodes,
nodesIndices: ni,
}
tips := map[[32]byte]types.Slot{
nodeB.root: nodeB.slot,
nodeC.root: nodeC.slot,
nodeD.root: nodeD.slot,
}
st := &optimisticStore{
validatedTips: tips,
}
f := &ForkChoice{
store: s,
syncedTips: st,
}
ctx := context.Background()
// We test the implementation of boundarySyncedTips
min, max := f.boundarySyncedTips()
require.Equal(t, min, types.Slot(101), "minimum tip slot is different")
require.Equal(t, max, types.Slot(103), "maximum tip slot is different")
// We test first nodes outside the Fork Choice store
_, err := f.IsOptimistic(ctx, root0)
require.ErrorIs(t, errUnknownNodeRoot, err)
_, err = f.IsOptimistic(ctx, root1)
require.ErrorIs(t, errUnknownNodeRoot, err)
// We check all nodes in the Fork Choice store.
op, err := f.IsOptimistic(ctx, nodeA.root)
require.NoError(t, err)
require.Equal(t, op, false)
op, err = f.IsOptimistic(ctx, nodeB.root)
require.NoError(t, err)
require.Equal(t, op, false)
op, err = f.IsOptimistic(ctx, nodeC.root)
require.NoError(t, err)
require.Equal(t, op, false)
op, err = f.IsOptimistic(ctx, nodeD.root)
require.NoError(t, err)
require.Equal(t, op, false)
op, err = f.IsOptimistic(ctx, nodeE.root)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.IsOptimistic(ctx, nodeF.root)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.IsOptimistic(ctx, nodeG.root)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.IsOptimistic(ctx, nodeH.root)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.IsOptimistic(ctx, nodeI.root)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.IsOptimistic(ctx, nodeJ.root)
require.NoError(t, err)
require.Equal(t, op, true)
op, err = f.IsOptimistic(ctx, nodeK.root)
require.NoError(t, err)
require.Equal(t, op, true)
// request a write Lock to synced Tips regression #10289
f.syncedTips.Lock()
defer f.syncedTips.Unlock()
}
// This tests the algorithm to update syncedTips
// We start with the following diagram
// We test the algorithm to update a node from SYNCING to INVALID
// We start with the same diagram as above:
//
// E -- F
// /
@@ -213,380 +19,52 @@ func TestOptimistic(t *testing.T) {
// \ \
// J -- K -- L
//
// And every block in the Fork choice is optimistic. Synced_Tips contains a
// single block that is outside of Fork choice
// And every block in the Fork choice is optimistic.
//
func TestSetOptimisticToValid(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
func TestPruneInvalid(t *testing.T) {
tests := []struct {
root [32]byte // the root of the new VALID block
tips map[[32]byte]types.Slot // the old synced tips
newTips map[[32]byte]types.Slot // the updated synced tips
wantedErr error
root [32]byte // the root of the new INVALID block
wantedNodeNumber int
}{
{
[32]byte{'i'},
map[[32]byte]types.Slot{[32]byte{'z'}: 90},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
nil,
},
{
[32]byte{'i'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
nil,
},
{
[32]byte{'i'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'e'}: 103,
},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'e'}: 104,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
nil,
},
{
[32]byte{'j'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
map[[32]byte]types.Slot{
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
[32]byte{'j'}: 102,
},
nil,
12,
},
{
[32]byte{'c'},
4,
},
{
[32]byte{'i'},
12,
},
{
[32]byte{'h'},
11,
},
{
[32]byte{'g'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
map[[32]byte]types.Slot{
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
[32]byte{'j'}: 102,
},
errInvalidBestChildIndex,
},
{
[32]byte{'p'},
map[[32]byte]types.Slot{},
map[[32]byte]types.Slot{},
errInvalidNodeIndex,
},
}
for _, tc := range tests {
f.syncedTips.Lock()
f.syncedTips.validatedTips = tc.tips
f.syncedTips.Unlock()
err := f.SetOptimisticToValid(context.Background(), tc.root)
if tc.wantedErr != nil {
require.ErrorIs(t, err, tc.wantedErr)
} else {
require.NoError(t, err)
f.syncedTips.RLock()
require.DeepEqual(t, f.syncedTips.validatedTips, tc.newTips)
f.syncedTips.RUnlock()
}
}
}
// We test the algorithm to update a node from SYNCING to INVALID
// We start with the same diagram as above:
//
// E(2) -- F(1)
// /
// C(7) -- D(6)
// / \
// A(10) -- B(9) G(3) -- H(1) -- I(0)
// \ \
// J(1) -- K(1) -- L(0)
//
// And every block in the Fork choice is optimistic. Synced_Tips contains a
// single block that is outside of Fork choice. The numbers in parentheses are
// the weights of the nodes before removal
//
func TestSetOptimisticToInvalid(t *testing.T) {
tests := []struct {
root [32]byte // the root of the new INVALID block
tips map[[32]byte]types.Slot // the old synced tips
wantedParentTip bool
newBestChild uint64
newBestDescendant uint64
newParentWeight uint64
}{
{
[32]byte{'j'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
},
false,
3,
4,
8,
},
{
[32]byte{'j'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
},
true,
3,
4,
8,
},
{
[32]byte{'i'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
[32]byte{'h'}: 105,
},
true,
NonExistentNode,
NonExistentNode,
1,
},
{
[32]byte{'i'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
},
false,
NonExistentNode,
NonExistentNode,
1,
},
}
for _, tc := range tests {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
weights := []uint64{10, 10, 9, 7, 1, 6, 2, 3, 1, 1, 1, 0, 0}
f.syncedTips.Lock()
f.syncedTips.validatedTips = tc.tips
f.syncedTips.Unlock()
f.store.nodesLock.Lock()
for i, node := range f.store.nodes {
node.weight = weights[i]
}
// Make j be the best child and descendant of b
nodeB := f.store.nodes[2]
nodeB.bestChild = 4
nodeB.bestDescendant = 4
idx := f.store.nodesIndices[tc.root]
node := f.store.nodes[idx]
parentIndex := node.parent
require.NotEqual(t, NonExistentNode, parentIndex)
parent := f.store.nodes[parentIndex]
f.store.nodesLock.Unlock()
err := f.SetOptimisticToInvalid(context.Background(), tc.root)
require.NoError(t, err)
f.syncedTips.RLock()
_, parentSyncedTip := f.syncedTips.validatedTips[parent.root]
f.syncedTips.RUnlock()
require.Equal(t, tc.wantedParentTip, parentSyncedTip)
require.Equal(t, tc.newBestChild, parent.bestChild)
require.Equal(t, tc.newBestDescendant, parent.bestDescendant)
require.Equal(t, tc.newParentWeight, parent.weight)
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
require.NoError(t, f.store.removeNode(context.Background(), tc.root))
require.Equal(t, tc.wantedNodeNumber, f.store.NodeNumber())
}
}
// This tests the algorithm to find the tip of a given node
// We start with the following diagram
//
// E -- F
// /
// C -- D
// / \
// A -- B G -- H -- I
// \ \
// J -- K -- L
//
//
func TestFindSyncedTip(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
tests := []struct {
root [32]byte // the root of the block
tips map[[32]byte]types.Slot // the synced tips
wanted [32]byte // the root of expected tip
}{
{
[32]byte{'i'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 104,
},
[32]byte{'g'},
},
{
[32]byte{'g'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'h'}: 104,
[32]byte{'k'}: 106,
},
[32]byte{'d'},
},
{
[32]byte{'e'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'g'}: 103,
},
[32]byte{'d'},
},
{
[32]byte{'j'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
[32]byte{'b'},
},
{
[32]byte{'g'},
map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'f'}: 105,
[32]byte{'g'}: 104,
[32]byte{'i'}: 106,
},
[32]byte{'g'},
},
}
for _, tc := range tests {
f.store.nodesLock.RLock()
node := f.store.nodes[f.store.nodesIndices[tc.root]]
syncedTips := &optimisticStore{
validatedTips: tc.tips,
}
syncedTips.RLock()
idx, err := f.store.findSyncedTip(ctx, node, syncedTips)
require.NoError(t, err)
require.Equal(t, tc.wanted, f.store.nodes[idx].root)
f.store.nodesLock.RUnlock()
syncedTips.RUnlock()
}
}
// This is a regression test (10341)
func TestIsOptimistic_DeadLock(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 90, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
tips := map[[32]byte]types.Slot{
[32]byte{'a'}: 100,
[32]byte{'d'}: 102,
}
f.syncedTips.validatedTips = tips
_, err := f.IsOptimistic(ctx, [32]byte{'a'})
require.NoError(t, err)
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
_, err = f.IsOptimistic(ctx, [32]byte{'e'})
require.NoError(t, err)
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
_, err = f.IsOptimistic(ctx, [32]byte{'b'})
require.NoError(t, err)
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
_, err = f.IsOptimistic(ctx, [32]byte{'c'})
require.NoError(t, err)
// Acquire a write lock, this should not hang
f.store.nodesLock.Lock()
f.store.nodesLock.Unlock()
}

View File

@@ -41,13 +41,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot := types.Slot(1)
newRoot := indexToHash(1)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
slot,
newRoot,
headRoot,
jEpoch,
fEpoch,
true,
),
)
f.ProcessAttestation(ctx, []uint64{0}, newRoot, fEpoch)
@@ -64,13 +65,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot = types.Slot(2)
newRoot = indexToHash(2)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
slot,
newRoot,
headRoot,
jEpoch,
fEpoch,
true,
),
)
f.ProcessAttestation(ctx, []uint64{1}, newRoot, fEpoch)
@@ -86,16 +88,17 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// 2
// |
// 3 <- HEAD
slot = types.Slot(2)
newRoot = indexToHash(2)
slot = types.Slot(3)
newRoot = indexToHash(3)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
slot,
newRoot,
headRoot,
jEpoch,
fEpoch,
true,
),
)
f.ProcessAttestation(ctx, []uint64{2}, newRoot, fEpoch)
@@ -114,13 +117,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
slot = types.Slot(3)
newRoot = indexToHash(4)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
slot,
newRoot,
headRoot,
jEpoch,
fEpoch,
true,
),
)
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
@@ -132,11 +136,11 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
// Check the ancestor scores from the store.
require.Equal(t, 4, len(f.store.nodes))
require.Equal(t, 5, len(f.store.nodeByRoot))
// Expect nodes to have a boosted, back-propagated score.
// Ancestors have the added weights of their children. Genesis is a special exception at 0 weight,
require.Equal(t, f.store.nodes[0].weight, uint64(0))
require.Equal(t, f.store.treeRootNode.weight, uint64(0))
// Otherwise, assuming a block, A, that is not-genesis:
//
@@ -152,14 +156,17 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
//
// In this case, we have a small fork:
//
// (A: 54) -> (B: 44) -> (C: 24)
// \_->(D: 10)
// (A: 54) -> (B: 44) -> (C: 34)
// \_->(D: 24)
//
// So B has its own weight, 10, and the sum of both C and D. That's why we see weight 54 in the
// middle instead of the normal progression of (44 -> 34 -> 24).
require.Equal(t, f.store.nodes[1].weight, uint64(54))
require.Equal(t, f.store.nodes[2].weight, uint64(44))
require.Equal(t, f.store.nodes[3].weight, uint64(24))
node1 := f.store.nodeByRoot[indexToHash(1)]
require.Equal(t, node1.weight, uint64(54))
node2 := f.store.nodeByRoot[indexToHash(2)]
require.Equal(t, node2.weight, uint64(44))
node3 := f.store.nodeByRoot[indexToHash(4)]
require.Equal(t, node3.weight, uint64(24))
})
t.Run("vanilla ex ante attack", func(t *testing.T) {
f := setup(jEpoch, fEpoch)
@@ -180,13 +187,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
honestBlockSlot := types.Slot(2)
honestBlock := indexToHash(2)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
honestBlockSlot,
honestBlock,
zeroHash,
jEpoch,
fEpoch,
true,
),
)
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
@@ -196,13 +204,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
maliciouslyWithheldBlockSlot := types.Slot(1)
maliciouslyWithheldBlock := indexToHash(1)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
maliciouslyWithheldBlockSlot,
maliciouslyWithheldBlock,
zeroHash,
jEpoch,
fEpoch,
true,
),
)
@@ -244,13 +253,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
honestBlockSlot := types.Slot(2)
honestBlock := indexToHash(2)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
honestBlockSlot,
honestBlock,
zeroHash,
jEpoch,
fEpoch,
true,
),
)
@@ -262,13 +272,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
maliciouslyWithheldBlockSlot := types.Slot(1)
maliciouslyWithheldBlock := indexToHash(1)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
maliciouslyWithheldBlockSlot,
maliciouslyWithheldBlock,
zeroHash,
jEpoch,
fEpoch,
true,
),
)
@@ -317,13 +328,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
cSlot := types.Slot(2)
c := indexToHash(2)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
cSlot,
c,
a, // parent
jEpoch,
fEpoch,
true,
),
)
@@ -340,13 +352,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
bSlot := types.Slot(1)
b := indexToHash(1)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
bSlot,
b,
a, // parent
jEpoch,
fEpoch,
true,
),
)
@@ -363,13 +376,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
dSlot := types.Slot(3)
d := indexToHash(3)
require.NoError(t,
f.InsertOptimisticBlock(
f.ProcessBlock(
ctx,
dSlot,
d,
b, // parent
jEpoch,
fEpoch,
true,
),
)

View File

@@ -1,16 +1,12 @@
package protoarray
import (
"bytes"
"context"
"fmt"
"github.com/pkg/errors"
types "github.com/prysmaticlabs/eth2-types"
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
"github.com/prysmaticlabs/prysm/config/params"
pmath "github.com/prysmaticlabs/prysm/math"
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
"go.opencensus.io/trace"
)
@@ -18,211 +14,62 @@ import (
// before getting pruned upon new finalization.
const defaultPruneThreshold = 256
// This tracks the last reported head root. Used for metrics.
var lastHeadRoot [32]byte
// This defines the largest staked power that an attacker is willing to
// spend in an attack to "safe head"
const safeHeadAttackersWeight = uint64(4096 * 32000000000)
// New initializes a new fork choice store.
func New(justifiedEpoch, finalizedEpoch types.Epoch, finalizedRoot [32]byte) *ForkChoice {
s := &Store{
justifiedEpoch: justifiedEpoch,
finalizedEpoch: finalizedEpoch,
finalizedRoot: finalizedRoot,
proposerBoostRoot: [32]byte{},
nodes: make([]*Node, 0),
nodesIndices: make(map[[32]byte]uint64),
canonicalNodes: make(map[[32]byte]bool),
pruneThreshold: defaultPruneThreshold,
// applyProposerBoostScore applies the current proposer boost scores to the
// relevant nodes
func (s *Store) applyProposerBoostScore(newBalances []uint64) error {
s.proposerBoostLock.Lock()
defer s.proposerBoostLock.Unlock()
proposerScore := uint64(0)
var err error
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
previousNode, ok := s.nodeByRoot[s.previousProposerBoostRoot]
if !ok || previousNode == nil {
return errInvalidProposerBoostRoot
}
previousNode.balance -= s.previousProposerBoostScore
}
b := make([]uint64, 0)
v := make([]Vote, 0)
st := &optimisticStore{
validatedTips: make(map[[32]byte]types.Slot),
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash {
currentNode, ok := s.nodeByRoot[s.proposerBoostRoot]
if !ok || currentNode == nil {
return errInvalidProposerBoostRoot
}
proposerScore, err = computeProposerBoostScore(newBalances)
if err != nil {
return err
}
currentNode.balance += proposerScore
}
return &ForkChoice{store: s, balances: b, votes: v, syncedTips: st}
}
// SetSyncedTips sets the synced and validated tips from the passed map
func (f *ForkChoice) SetSyncedTips(tips map[[32]byte]types.Slot) error {
if len(tips) == 0 {
return errInvalidSyncedTips
}
newTips := make(map[[32]byte]types.Slot, len(tips))
for k, v := range tips {
newTips[k] = v
}
f.syncedTips.Lock()
defer f.syncedTips.Unlock()
f.syncedTips.validatedTips = newTips
s.previousProposerBoostRoot = s.proposerBoostRoot
s.previousProposerBoostScore = proposerScore
return nil
}
// SyncedTips returns the synced and validated tips from the fork choice store.
func (f *ForkChoice) SyncedTips() map[[32]byte]types.Slot {
f.syncedTips.RLock()
defer f.syncedTips.RUnlock()
m := make(map[[32]byte]types.Slot)
for k, v := range f.syncedTips.validatedTips {
m[k] = v
}
return m
// NodeNumber returns the current number of nodes in the Store
func (s *Store) NodeNumber() int {
return len(s.nodeByRoot)
}
// Head returns the head root from fork choice store.
// It firsts computes validator's balance changes then recalculates block tree from leaves to root.
func (f *ForkChoice) Head(
ctx context.Context,
justifiedEpoch types.Epoch,
justifiedRoot [32]byte,
justifiedStateBalances []uint64,
finalizedEpoch types.Epoch,
) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.Head")
defer span.End()
f.votesLock.Lock()
defer f.votesLock.Unlock()
calledHeadCount.Inc()
newBalances := justifiedStateBalances
// Using the write lock here because `updateCanonicalNodes` that gets called subsequently requires a write operation.
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
deltas, newVotes, err := computeDeltas(ctx, f.store.nodesIndices, f.votes, f.balances, newBalances)
if err != nil {
return [32]byte{}, errors.Wrap(err, "Could not compute deltas")
}
f.votes = newVotes
if err := f.store.applyWeightChanges(ctx, justifiedEpoch, finalizedEpoch, newBalances, deltas); err != nil {
return [32]byte{}, errors.Wrap(err, "Could not apply score changes")
}
f.balances = newBalances
return f.store.head(ctx, justifiedRoot)
// JustifiedEpoch of fork choice store.
func (s *Store) JustifiedEpoch() types.Epoch {
return s.justifiedEpoch
}
// ProcessAttestation processes attestation for vote accounting, it iterates around validator indices
// and update their votes accordingly.
func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []uint64, blockRoot [32]byte, targetEpoch types.Epoch) {
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.ProcessAttestation")
defer span.End()
f.votesLock.Lock()
defer f.votesLock.Unlock()
for _, index := range validatorIndices {
// Validator indices will grow the vote cache.
for index >= uint64(len(f.votes)) {
f.votes = append(f.votes, Vote{currentRoot: params.BeaconConfig().ZeroHash, nextRoot: params.BeaconConfig().ZeroHash})
}
// Newly allocated vote if the root fields are untouched.
newVote := f.votes[index].nextRoot == params.BeaconConfig().ZeroHash &&
f.votes[index].currentRoot == params.BeaconConfig().ZeroHash
// Vote gets updated if it's newly allocated or high target epoch.
if newVote || targetEpoch > f.votes[index].nextEpoch {
f.votes[index].nextEpoch = targetEpoch
f.votes[index].nextRoot = blockRoot
}
}
processedAttestationCount.Inc()
// FinalizedEpoch of fork choice store.
func (s *Store) FinalizedEpoch() types.Epoch {
return s.finalizedEpoch
}
// NodeCount returns the current number of nodes in the Store
func (f *ForkChoice) NodeCount() int {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
return len(f.store.nodes)
}
// ProposerBoost returns the proposerBoost of the store
func (f *ForkChoice) ProposerBoost() [fieldparams.RootLength]byte {
return f.store.proposerBoost()
}
// InsertOptimisticBlock processes a new block by inserting it to the fork choice store.
func (f *ForkChoice) InsertOptimisticBlock(
ctx context.Context,
slot types.Slot,
blockRoot, parentRoot [32]byte,
justifiedEpoch, finalizedEpoch types.Epoch) error {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.InsertOptimisticBlock")
defer span.End()
return f.store.insert(ctx, slot, blockRoot, parentRoot, justifiedEpoch, finalizedEpoch)
}
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
// root is different than the current store finalized root, and the number of the store has met prune threshold.
func (f *ForkChoice) Prune(ctx context.Context, finalizedRoot [32]byte) error {
return f.store.prune(ctx, finalizedRoot, f.syncedTips)
}
// HasNode returns true if the node exists in fork choice store,
// false else wise.
func (f *ForkChoice) HasNode(root [32]byte) bool {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
_, ok := f.store.nodesIndices[root]
return ok
}
// HasParent returns true if the node parent exists in fork choice store,
// false else wise.
func (f *ForkChoice) HasParent(root [32]byte) bool {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
i, ok := f.store.nodesIndices[root]
if !ok || i >= uint64(len(f.store.nodes)) {
return false
}
return f.store.nodes[i].parent != NonExistentNode
}
// IsCanonical returns true if the given root is part of the canonical chain.
func (f *ForkChoice) IsCanonical(root [32]byte) bool {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
return f.store.canonicalNodes[root]
}
// AncestorRoot returns the ancestor root of input block root at a given slot.
func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "protoArray.AncestorRoot")
defer span.End()
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
i, ok := f.store.nodesIndices[root]
if !ok {
return nil, errors.New("node does not exist")
}
if i >= uint64(len(f.store.nodes)) {
return nil, errors.New("node index out of range")
}
for f.store.nodes[i].slot > slot {
if ctx.Err() != nil {
return nil, ctx.Err()
}
i = f.store.nodes[i].parent
if i >= uint64(len(f.store.nodes)) {
return nil, errors.New("node index out of range")
}
}
return f.store.nodes[i].root[:], nil
// ProposerBoost of fork choice store.
func (s *Store) ProposerBoost() [fieldparams.RootLength]byte {
s.proposerBoostLock.RLock()
defer s.proposerBoostLock.RUnlock()
return s.proposerBoostRoot
}
// PruneThreshold of fork choice store.
@@ -230,120 +77,46 @@ func (s *Store) PruneThreshold() uint64 {
return s.pruneThreshold
}
// JustifiedEpoch of fork choice store.
func (f *ForkChoice) JustifiedEpoch() types.Epoch {
return f.store.justifiedEpoch
}
// FinalizedEpoch of fork choice store.
func (f *ForkChoice) FinalizedEpoch() types.Epoch {
return f.store.finalizedEpoch
}
// proposerBoost of fork choice store.
func (s *Store) proposerBoost() [fieldparams.RootLength]byte {
s.proposerBoostLock.RLock()
defer s.proposerBoostLock.RUnlock()
return s.proposerBoostRoot
}
// head starts from justified root and then follows the best descendant links
// to find the best block for head.
// to find the best block for head. This function assumes a lock on s.nodesLock
func (s *Store) head(ctx context.Context, justifiedRoot [32]byte) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.head")
defer span.End()
// Justified index has to be valid in node indices map, and can not be out of bound.
justifiedIndex, ok := s.nodesIndices[justifiedRoot]
if !ok {
// JustifiedRoot has to be known
justifiedNode, ok := s.nodeByRoot[justifiedRoot]
if !ok || justifiedNode == nil {
return [32]byte{}, errUnknownJustifiedRoot
}
if justifiedIndex >= uint64(len(s.nodes)) {
return [32]byte{}, errInvalidJustifiedIndex
}
justifiedNode := s.nodes[justifiedIndex]
bestDescendantIndex := justifiedNode.bestDescendant
// If the justified node doesn't have a best descendant,
// the best node is itself.
if bestDescendantIndex == NonExistentNode {
bestDescendantIndex = justifiedIndex
}
if bestDescendantIndex >= uint64(len(s.nodes)) {
return [32]byte{}, errInvalidBestDescendantIndex
bestDescendant := justifiedNode.bestDescendant
if bestDescendant == nil {
bestDescendant = justifiedNode
}
bestNode := s.nodes[bestDescendantIndex]
if !s.viableForHead(bestNode) {
if !bestDescendant.viableForHead(s.justifiedEpoch, s.finalizedEpoch) {
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch %d != %d, justifiedEpoch %d != %d",
bestNode.slot, bestNode.weight/10e9, bestNode.finalizedEpoch, s.finalizedEpoch, bestNode.justifiedEpoch, s.justifiedEpoch)
bestDescendant.slot, bestDescendant.weight/10e9, bestDescendant.finalizedEpoch, s.finalizedEpoch, bestDescendant.justifiedEpoch, s.justifiedEpoch)
}
// Update metrics.
if bestNode.root != lastHeadRoot {
if bestDescendant != s.headNode {
headChangesCount.Inc()
headSlotNumber.Set(float64(bestNode.slot))
lastHeadRoot = bestNode.root
headSlotNumber.Set(float64(bestDescendant.slot))
s.headNode = bestDescendant
}
// Update canonical mapping given the head root.
if err := s.updateCanonicalNodes(ctx, bestNode.root); err != nil {
return [32]byte{}, err
}
return bestNode.root, nil
}
// updateCanonicalNodes updates the canonical nodes mapping given the input block root.
func (s *Store) updateCanonicalNodes(ctx context.Context, root [32]byte) error {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.updateCanonicalNodes")
defer span.End()
// Set the input node to canonical.
i := s.nodesIndices[root]
var newCanonicalRoots [][32]byte
var n *Node
for i != NonExistentNode {
if ctx.Err() != nil {
return ctx.Err()
}
// Get the parent node, if the node is already in canonical mapping,
// we can be sure rest of the ancestors are canonical. Exit early.
n = s.nodes[i]
if s.canonicalNodes[n.root] {
break
}
// Set parent node to canonical. Repeat until parent node index is undefined.
newCanonicalRoots = append(newCanonicalRoots, n.root)
i = n.parent
}
// i is either NonExistentNode or has the index of the last canonical
// node before the last head update.
if i == NonExistentNode {
s.canonicalNodes = make(map[[fieldparams.RootLength]byte]bool)
} else {
for j := i + 1; j < uint64(len(s.nodes)); j++ {
delete(s.canonicalNodes, s.nodes[j].root)
}
}
for _, canonicalRoot := range newCanonicalRoots {
s.canonicalNodes[canonicalRoot] = true
}
return nil
return bestDescendant.root, nil
}
// insert registers a new block node to the fork choice store's node list.
// It then updates the new node's parent with best child and descendant node.
func (s *Store) insert(ctx context.Context,
slot types.Slot,
root, parent [32]byte,
justifiedEpoch, finalizedEpoch types.Epoch) error {
root, parentRoot [fieldparams.RootLength]byte,
justifiedEpoch, finalizedEpoch types.Epoch, optimistic bool) error {
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.insert")
defer span.End()
@@ -351,425 +124,155 @@ func (s *Store) insert(ctx context.Context,
defer s.nodesLock.Unlock()
// Return if the block has been inserted into Store before.
if _, ok := s.nodesIndices[root]; ok {
if _, ok := s.nodeByRoot[root]; ok {
return nil
}
index := uint64(len(s.nodes))
parentIndex, ok := s.nodesIndices[parent]
// Mark genesis block's parent as non-existent.
if !ok {
parentIndex = NonExistentNode
}
parent := s.nodeByRoot[parentRoot]
n := &Node{
slot: slot,
root: root,
parent: parentIndex,
parent: parent,
justifiedEpoch: justifiedEpoch,
finalizedEpoch: finalizedEpoch,
bestChild: NonExistentNode,
bestDescendant: NonExistentNode,
weight: 0,
optimistic: optimistic,
}
s.nodesIndices[root] = index
s.nodes = append(s.nodes, n)
// Update parent with the best child and descendant only if it's available.
if n.parent != NonExistentNode {
if err := s.updateBestChildAndDescendant(parentIndex, index); err != nil {
s.nodeByRoot[root] = n
if parent != nil {
parent.children = append(parent.children, n)
if err := s.treeRootNode.updateBestDescendant(ctx, s.justifiedEpoch, s.finalizedEpoch); err != nil {
return err
}
}
// Update metrics.
processedBlockCount.Inc()
nodeCount.Set(float64(len(s.nodes)))
return nil
}
// applyWeightChanges iterates backwards through the nodes in store. It checks all nodes parent
// and its best child. For each node, it updates the weight with input delta and
// back propagate the nodes' delta to its parents' delta. After scoring changes,
// the best child is then updated along with the best descendant.
func (s *Store) applyWeightChanges(
ctx context.Context, justifiedEpoch, finalizedEpoch types.Epoch, newBalances []uint64, delta []int,
) error {
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.applyWeightChanges")
defer span.End()
// The length of the nodes can not be different than length of the delta.
if len(s.nodes) != len(delta) {
return errInvalidDeltaLength
}
// Update the justified / finalized epochs in store if necessary.
if s.justifiedEpoch != justifiedEpoch || s.finalizedEpoch != finalizedEpoch {
s.justifiedEpoch = justifiedEpoch
s.finalizedEpoch = finalizedEpoch
}
// Proposer score defaults to 0.
proposerScore := uint64(0)
// Iterate backwards through all index to node in store.
var err error
for i := len(s.nodes) - 1; i >= 0; i-- {
n := s.nodes[i]
// There is no need to adjust the balances or manage parent of the zero hash, it
// is an alias to the genesis block.
if n.root == params.BeaconConfig().ZeroHash {
continue
}
nodeDelta := delta[i]
// If we have a node where the proposer boost was previously applied,
// we then decrease the delta by the required score amount.
s.proposerBoostLock.Lock()
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash && s.previousProposerBoostRoot == n.root {
nodeDelta -= int(s.previousProposerBoostScore)
}
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash && s.proposerBoostRoot == n.root {
proposerScore, err = computeProposerBoostScore(newBalances)
if err != nil {
s.proposerBoostLock.Unlock()
return err
}
iProposerScore, err := pmath.Int(proposerScore)
if err != nil {
return err
}
nodeDelta = nodeDelta + iProposerScore
}
s.proposerBoostLock.Unlock()
// A node's weight can not be negative but the delta can be negative.
if nodeDelta < 0 {
d := uint64(-nodeDelta)
if n.weight < d {
n.weight = 0
} else {
n.weight -= d
}
} else {
n.weight += uint64(nodeDelta)
}
// Update parent's best child and descendant if the node has a known parent.
if n.parent != NonExistentNode {
// Protection against node parent index out of bound. This should not happen.
if int(n.parent) >= len(delta) {
return errInvalidParentDelta
}
// Back propagate the nodes' delta to its parent.
delta[n.parent] += nodeDelta
}
}
// Set the previous boosted root and score.
s.proposerBoostLock.Lock()
s.previousProposerBoostRoot = s.proposerBoostRoot
s.previousProposerBoostScore = proposerScore
s.proposerBoostLock.Unlock()
for i := len(s.nodes) - 1; i >= 0; i-- {
n := s.nodes[i]
if n.parent != NonExistentNode {
if int(n.parent) >= len(delta) {
return errInvalidParentDelta
}
if err := s.updateBestChildAndDescendant(n.parent, uint64(i)); err != nil {
return err
}
}
}
return nil
}
// updateBestChildAndDescendant updates parent node's best child and descendant.
// It looks at input parent node and input child node and potentially modifies parent's best
// child and best descendant indices.
// There are four outcomes:
// 1.) The child is already the best child, but it's now invalid due to a FFG change and should be removed.
// 2.) The child is already the best child and the parent is updated with the new best descendant.
// 3.) The child is not the best child but becomes the best child.
// 4.) The child is not the best child and does not become the best child.
func (s *Store) updateBestChildAndDescendant(parentIndex, childIndex uint64) error {
// Protection against parent index out of bound, this should not happen.
if parentIndex >= uint64(len(s.nodes)) {
return errInvalidNodeIndex
}
parent := s.nodes[parentIndex]
// Protection against child index out of bound, again this should not happen.
if childIndex >= uint64(len(s.nodes)) {
return errInvalidNodeIndex
}
child := s.nodes[childIndex]
// Is the child viable to become head? Based on justification and finalization rules.
childLeadsToViableHead, err := s.leadsToViableHead(child)
if err != nil {
return err
}
// Define 3 variables for the 3 outcomes mentioned above. This is to
// set `parent.bestChild` and `parent.bestDescendant` to. These
// aliases are to assist readability.
changeToNone := []uint64{NonExistentNode, NonExistentNode}
bestDescendant := child.bestDescendant
if bestDescendant == NonExistentNode {
bestDescendant = childIndex
}
changeToChild := []uint64{childIndex, bestDescendant}
noChange := []uint64{parent.bestChild, parent.bestDescendant}
var newParentChild []uint64
if parent.bestChild != NonExistentNode {
if parent.bestChild == childIndex && !childLeadsToViableHead {
// If the child is already the best child of the parent, but it's not viable for head,
// we should remove it. (Outcome 1)
newParentChild = changeToNone
} else if parent.bestChild == childIndex {
// If the child is already the best child of the parent, set it again to ensure the best
// descendant of the parent is updated. (Outcome 2)
newParentChild = changeToChild
} else {
// Protection against parent's best child going out of bound.
if parent.bestChild > uint64(len(s.nodes)) {
return errInvalidBestDescendantIndex
}
bestChild := s.nodes[parent.bestChild]
// Is current parent's best child viable to be head? Based on justification and finalization rules.
bestChildLeadsToViableHead, err := s.leadsToViableHead(bestChild)
if err != nil {
return err
}
if childLeadsToViableHead && !bestChildLeadsToViableHead {
// The child leads to a viable head, but the current parent's best child doesn't.
newParentChild = changeToChild
} else if !childLeadsToViableHead && bestChildLeadsToViableHead {
// The child doesn't lead to a viable head, the current parent's best child does.
newParentChild = noChange
} else if child.weight == bestChild.weight {
// If both are viable, compare their weights.
// Tie-breaker of equal weights by root.
if bytes.Compare(child.root[:], bestChild.root[:]) > 0 {
newParentChild = changeToChild
} else {
newParentChild = noChange
}
} else {
// Choose winner by weight.
if child.weight > bestChild.weight {
newParentChild = changeToChild
} else {
newParentChild = noChange
}
}
if !optimistic {
if err := n.setNodeAndParentValidated(ctx); err != nil {
return err
}
} else {
if childLeadsToViableHead {
// If parent doesn't have a best child and the child is viable.
newParentChild = changeToChild
} else {
// If parent doesn't have a best child and the child is not viable.
newParentChild = noChange
}
optimisticCount.Inc()
}
// Update parent with the outcome.
parent.bestChild = newParentChild[0]
parent.bestDescendant = newParentChild[1]
s.nodes[parentIndex] = parent
// Set the node as root if the store was empty
if s.treeRootNode == nil {
s.treeRootNode = n
s.headNode = n
}
// Update metrics.
processedBlockCount.Inc()
nodeCount.Set(float64(len(s.nodeByRoot)))
return nil
}
// prune prunes the store with the new finalized root. The tree is only
// pruned if the input finalized root are different than the one in stored and
// the number of the nodes in store has met prune threshold.
func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *optimisticStore) error {
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.prune")
// updateCheckpoints Update the justified / finalized epochs in store if necessary.
func (s *Store) updateCheckpoints(justifiedEpoch, finalizedEpoch types.Epoch) {
s.justifiedEpoch = justifiedEpoch
s.finalizedEpoch = finalizedEpoch
}
// pruneMaps prunes the `nodeByRoot` map
// starting from `node` down to the finalized Node or to a leaf of the Fork
// choice store. This method assumes a lock on nodesLock.
func (s *Store) pruneMaps(ctx context.Context, node, finalizedNode *Node) error {
if ctx.Err() != nil {
return ctx.Err()
}
if node == finalizedNode {
return nil
}
for _, child := range node.children {
if err := s.pruneMaps(ctx, child, finalizedNode); err != nil {
return err
}
}
delete(s.nodeByRoot, node.root)
return nil
}
// prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
// root is different than the current store finalized root, and the number of the store has met prune threshold.
func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.Prune")
defer span.End()
s.nodesLock.Lock()
defer s.nodesLock.Unlock()
// The node would have seen finalized root or else it
// wouldn't be able to prune it.
finalizedIndex, ok := s.nodesIndices[finalizedRoot]
if !ok {
finalizedNode, ok := s.nodeByRoot[finalizedRoot]
if !ok || finalizedNode == nil {
return errUnknownFinalizedRoot
}
// The number of the nodes has not met the prune threshold.
// Pruning at small numbers incurs more cost than benefit.
if finalizedIndex < s.pruneThreshold {
if finalizedNode.depth() < s.pruneThreshold {
return nil
}
// Traverse through the node list starting from the finalized node at index 0.
// Nodes that are not branching off from the finalized node will be removed.
syncedTips.Lock()
defer syncedTips.Unlock()
canonicalNodesMap := make(map[uint64]uint64, uint64(len(s.nodes))-finalizedIndex)
canonicalNodes := make([]*Node, 1, uint64(len(s.nodes))-finalizedIndex)
finalizedNode := s.nodes[finalizedIndex]
finalizedTipIndex, err := s.findSyncedTip(ctx, finalizedNode, syncedTips)
if err != nil {
// Prune nodeByRoot starting from root
if err := s.pruneMaps(ctx, s.treeRootNode, finalizedNode); err != nil {
return err
}
finalizedNode.parent = NonExistentNode
canonicalNodes[0] = finalizedNode
canonicalNodesMap[finalizedIndex] = uint64(0)
for idx := uint64(0); idx < uint64(len(s.nodes)); idx++ {
node := copyNode(s.nodes[idx])
parentIdx, ok := canonicalNodesMap[node.parent]
if ok {
s.nodesIndices[node.root] = uint64(len(canonicalNodes))
canonicalNodesMap[idx] = uint64(len(canonicalNodes))
node.parent = parentIdx
canonicalNodes = append(canonicalNodes, node)
} else {
// Remove node and synced tip that is not part of finalized branch.
delete(s.nodesIndices, node.root)
_, ok := syncedTips.validatedTips[node.root]
if ok && idx != finalizedTipIndex {
delete(syncedTips.validatedTips, node.root)
}
}
}
s.nodesIndices[finalizedRoot] = uint64(0)
finalizedNode.parent = nil
s.treeRootNode = finalizedNode
// Recompute the best child and descendant for each canonical nodes.
for _, node := range canonicalNodes {
if node.bestChild != NonExistentNode {
node.bestChild = canonicalNodesMap[node.bestChild]
}
if node.bestDescendant != NonExistentNode {
node.bestDescendant = canonicalNodesMap[node.bestDescendant]
}
}
s.nodes = canonicalNodes
prunedCount.Inc()
syncedTipsCount.Set(float64(len(syncedTips.validatedTips)))
return nil
}
// leadsToViableHead returns true if the node or the best descendant of the node is viable for head.
// Any node with diff finalized or justified epoch than the ones in fork choice store
// should not be viable to head.
func (s *Store) leadsToViableHead(node *Node) (bool, error) {
var bestDescendantViable bool
bestDescendantIndex := node.bestDescendant
// If the best descendant is not part of the leaves.
if bestDescendantIndex != NonExistentNode {
// Protection against out of bound, the best descendant index can not be
// exceeds length of nodes list.
if bestDescendantIndex >= uint64(len(s.nodes)) {
return false, errInvalidBestDescendantIndex
}
bestDescendantNode := s.nodes[bestDescendantIndex]
bestDescendantViable = s.viableForHead(bestDescendantNode)
}
// The node is viable as long as the best descendant is viable.
return bestDescendantViable || s.viableForHead(node), nil
}
// viableForHead returns true if the node is viable to head.
// Any node with diff finalized or justified epoch than the ones in fork choice store
// should not be viable to head.
func (s *Store) viableForHead(node *Node) bool {
// `node` is viable if its justified epoch and finalized epoch are the same as the one in `Store`.
// It's also viable if we are in genesis epoch.
justified := s.justifiedEpoch == node.justifiedEpoch || s.justifiedEpoch == 0
finalized := s.finalizedEpoch == node.finalizedEpoch || s.finalizedEpoch == 0
return justified && finalized
}
// Returns the list of leaves in the Fork Choice store.
// These are all the nodes that have NonExistentNode as best child.
// This internal method assumes that the caller holds a lock in s.nodesLock.
func (s *Store) leaves() ([]uint64, error) {
var leaves []uint64
for i := uint64(0); i < uint64(len(s.nodes)); i++ {
node := s.nodes[i]
if node.bestChild == NonExistentNode {
leaves = append(leaves, i)
// tips returns a list of possible heads from fork choice store, it returns the
// roots and the slots of the leaf nodes.
func (s *Store) tips() ([][32]byte, []types.Slot) {
var roots [][32]byte
var slots []types.Slot
for root, node := range s.nodeByRoot {
if len(node.children) == 0 {
roots = append(roots, root)
slots = append(slots, node.slot)
}
}
return leaves, nil
return roots, slots
}
// Tips returns all possible chain heads (leaves of fork choice tree).
// Heads roots and heads slots are returned.
func (f *ForkChoice) Tips() ([][32]byte, []types.Slot) {
//TreeRoot returns the current root Node of the Store
func (s *Store) TreeRoot() *Node {
s.nodesLock.RLock()
defer s.nodesLock.RUnlock()
return s.treeRootNode
}
// Deliberate choice to not preallocate space for below.
// Heads cant be more than 2-3 in the worst case where pre-allocation will be 64 to begin with.
headsRoots := make([][32]byte, 0)
headsSlots := make([]types.Slot, 0)
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
for _, node := range f.store.nodes {
// Possible heads have no children.
if node.BestDescendant() == NonExistentNode && node.BestChild() == NonExistentNode {
headsRoots = append(headsRoots, node.Root())
headsSlots = append(headsSlots, node.Slot())
}
// Safe returns whether the block is considered safe with respect to
// synchronicity assumptions on the network and how much an attacker is willing
// to spend
func (s *Store) Safe(ctx context.Context, root [32]byte, committeeWeight uint64) (bool, error) {
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
return false, errNilNode
}
return headsRoots, headsSlots
}
func (f *ForkChoice) ForkChoiceNodes() []*pbrpc.ForkChoiceNode {
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
ret := make([]*pbrpc.ForkChoiceNode, len(f.store.nodes))
var parentRoot [32]byte
for i, node := range f.store.nodes {
root := node.Root()
parentIdx := node.parent
if parentIdx == NonExistentNode {
parentRoot = params.BeaconConfig().ZeroHash
} else {
parent := f.store.nodes[parentIdx]
parentRoot = parent.Root()
}
bestDescendantIdx := node.BestDescendant()
var bestDescendantRoot [32]byte
if bestDescendantIdx == NonExistentNode {
bestDescendantRoot = params.BeaconConfig().ZeroHash
} else {
bestDescendantNode := f.store.nodes[bestDescendantIdx]
bestDescendantRoot = bestDescendantNode.Root()
}
ret[i] = &pbrpc.ForkChoiceNode{
Slot: node.Slot(),
Root: root[:],
Parent: parentRoot[:],
JustifiedEpoch: node.JustifiedEpoch(),
FinalizedEpoch: node.FinalizedEpoch(),
Weight: node.Weight(),
BestDescendant: bestDescendantRoot[:],
}
if node.bestDescendant != nil && node.bestDescendant != s.headNode {
return false, nil
}
return ret
potentialVotes := uint64(s.headNode.slot-node.slot+1) * committeeWeight
actualVotes := node.weight
tailWeight := uint64(0)
for node.parent != nil {
if ctx.Err() != nil {
return false, ctx.Err()
}
node = node.parent
tailWeight += node.balance
}
s.proposerBoostLock.RLock()
defer s.proposerBoostLock.RUnlock()
return tailWeight+2*actualVotes > potentialVotes+safeHeadAttackersWeight+s.previousProposerBoostScore, nil
}

View File

@@ -6,7 +6,6 @@ import (
types "github.com/prysmaticlabs/eth2-types"
"github.com/prysmaticlabs/prysm/config/params"
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/testing/assert"
"github.com/prysmaticlabs/prysm/testing/require"
)
@@ -22,409 +21,177 @@ func TestStore_PruneThreshold(t *testing.T) {
func TestStore_JustifiedEpoch(t *testing.T) {
j := types.Epoch(100)
f := setup(j, j)
require.Equal(t, j, f.JustifiedEpoch())
s := &Store{
justifiedEpoch: j,
}
if got := s.JustifiedEpoch(); got != j {
t.Errorf("JustifiedEpoch() = %v, want %v", got, j)
}
}
func TestStore_FinalizedEpoch(t *testing.T) {
j := types.Epoch(50)
f := setup(j, j)
require.Equal(t, j, f.FinalizedEpoch())
f := types.Epoch(50)
s := &Store{
finalizedEpoch: f,
}
if got := s.FinalizedEpoch(); got != f {
t.Errorf("FinalizedEpoch() = %v, want %v", got, f)
}
}
func TestStore_NodeNumber(t *testing.T) {
f := setup(0, 0)
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.Equal(t, 2, f.store.NodeNumber())
}
func TestStore_NodeByRoot(t *testing.T) {
f := setup(0, 0)
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 0, 0, false))
node0 := f.store.treeRootNode
node1 := node0.children[0]
node2 := node1.children[0]
expectedRoots := map[[32]byte]*Node{
params.BeaconConfig().ZeroHash: node0,
indexToHash(1): node1,
indexToHash(2): node2,
}
require.Equal(t, 3, f.store.NodeNumber())
for root, node := range f.store.nodeByRoot {
v, ok := expectedRoots[root]
require.Equal(t, ok, true)
require.Equal(t, v, node)
}
}
func TestForkChoice_HasNode(t *testing.T) {
nodeIndices := map[[32]byte]uint64{
{'a'}: 1,
{'b'}: 2,
}
s := &Store{
nodesIndices: nodeIndices,
}
f := &ForkChoice{store: s}
require.Equal(t, true, f.HasNode([32]byte{'a'}))
f := setup(0, 0)
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.Equal(t, true, f.HasNode(indexToHash(1)))
}
func TestStore_Head_UnknownJustifiedRoot(t *testing.T) {
s := &Store{nodesIndices: make(map[[32]byte]uint64)}
f := setup(0, 0)
_, err := s.head(context.Background(), [32]byte{})
_, err := f.store.head(context.Background(), [32]byte{'a'})
assert.ErrorContains(t, errUnknownJustifiedRoot.Error(), err)
}
func TestStore_Head_UnknownJustifiedIndex(t *testing.T) {
r := [32]byte{'A'}
indices := make(map[[32]byte]uint64)
indices[r] = 1
s := &Store{nodesIndices: indices}
_, err := s.head(context.Background(), r)
assert.ErrorContains(t, errInvalidJustifiedIndex.Error(), err)
}
func TestStore_Head_Itself(t *testing.T) {
r := [32]byte{'A'}
indices := map[[32]byte]uint64{r: 0}
f := setup(0, 0)
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
// Since the justified node does not have a best descendant so the best node
// is itself.
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, parent: NonExistentNode, bestDescendant: NonExistentNode}}, canonicalNodes: make(map[[32]byte]bool)}
h, err := s.head(context.Background(), r)
h, err := f.store.head(context.Background(), indexToHash(1))
require.NoError(t, err)
assert.Equal(t, r, h)
assert.Equal(t, indexToHash(1), h)
}
func TestStore_Head_BestDescendant(t *testing.T) {
r := [32]byte{'A'}
best := [32]byte{'B'}
indices := map[[32]byte]uint64{r: 0, best: 1}
// Since the justified node's best descendant is at index 1, and its root is `best`,
// the head should be `best`.
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, bestDescendant: 1, parent: NonExistentNode}, {root: best, parent: 0}}, canonicalNodes: make(map[[32]byte]bool)}
h, err := s.head(context.Background(), r)
f := setup(0, 0)
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(3), indexToHash(1), 0, 0, false))
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(4), indexToHash(2), 0, 0, false))
h, err := f.store.head(context.Background(), indexToHash(1))
require.NoError(t, err)
assert.Equal(t, best, h)
require.Equal(t, h, indexToHash(4))
}
func TestStore_Head_ContextCancelled(t *testing.T) {
func TestStore_UpdateBestDescendant_ContextCancelled(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
r := [32]byte{'A'}
best := [32]byte{'B'}
indices := map[[32]byte]uint64{r: 0, best: 1}
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, parent: NonExistentNode, bestDescendant: 1}, {root: best, parent: 0}}, canonicalNodes: make(map[[32]byte]bool)}
f := setup(0, 0)
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
cancel()
_, err := s.head(ctx, r)
err := f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false)
require.ErrorContains(t, "context canceled", err)
}
func TestStore_Insert_UnknownParent(t *testing.T) {
func TestStore_Insert(t *testing.T) {
// The new node does not have a parent.
s := &Store{nodesIndices: make(map[[32]byte]uint64)}
require.NoError(t, s.insert(context.Background(), 100, [32]byte{'A'}, [32]byte{'B'}, 1, 1))
assert.Equal(t, 1, len(s.nodes), "Did not insert block")
assert.Equal(t, 1, len(s.nodesIndices), "Did not insert block")
assert.Equal(t, NonExistentNode, s.nodes[0].parent, "Incorrect parent")
assert.Equal(t, types.Epoch(1), s.nodes[0].justifiedEpoch, "Incorrect justification")
assert.Equal(t, types.Epoch(1), s.nodes[0].finalizedEpoch, "Incorrect finalization")
assert.Equal(t, [32]byte{'A'}, s.nodes[0].root, "Incorrect root")
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode}
require.NoError(t, s.insert(context.Background(), 100, indexToHash(100), indexToHash(0), 1, 1, false))
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")
assert.Equal(t, (*Node)(nil), treeRootNode.parent, "Incorrect parent")
assert.Equal(t, 1, len(treeRootNode.children), "Incorrect children number")
child := treeRootNode.children[0]
assert.Equal(t, types.Epoch(1), child.justifiedEpoch, "Incorrect justification")
assert.Equal(t, types.Epoch(1), child.finalizedEpoch, "Incorrect finalization")
assert.Equal(t, indexToHash(100), child.root, "Incorrect root")
}
func TestStore_Insert_KnownParent(t *testing.T) {
// Similar to UnknownParent test, but this time the new node has a valid parent already in store.
// The new node builds on top of the parent.
s := &Store{nodesIndices: make(map[[32]byte]uint64)}
s.nodes = []*Node{{}}
p := [32]byte{'B'}
s.nodesIndices[p] = 0
require.NoError(t, s.insert(context.Background(), 100, [32]byte{'A'}, p, 1, 1))
assert.Equal(t, 2, len(s.nodes), "Did not insert block")
assert.Equal(t, 2, len(s.nodesIndices), "Did not insert block")
assert.Equal(t, uint64(0), s.nodes[1].parent, "Incorrect parent")
assert.Equal(t, types.Epoch(1), s.nodes[1].justifiedEpoch, "Incorrect justification")
assert.Equal(t, types.Epoch(1), s.nodes[1].finalizedEpoch, "Incorrect finalization")
assert.Equal(t, [32]byte{'A'}, s.nodes[1].root, "Incorrect root")
}
func TestStore_updateCheckpoints(t *testing.T) {
f := setup(0, 0)
s := f.store
func TestStore_ApplyScoreChanges_InvalidDeltaLength(t *testing.T) {
s := &Store{}
// This will fail because node indices has length of 0, and delta list has a length of 1.
err := s.applyWeightChanges(context.Background(), 0, 0, []uint64{}, []int{1})
assert.ErrorContains(t, errInvalidDeltaLength.Error(), err)
}
func TestStore_ApplyScoreChanges_UpdateEpochs(t *testing.T) {
s := &Store{}
// The justified and finalized epochs in Store should be updated to 1 and 1 given the following input.
require.NoError(t, s.applyWeightChanges(context.Background(), 1, 1, []uint64{}, []int{}))
s.updateCheckpoints(1, 1)
assert.Equal(t, types.Epoch(1), s.justifiedEpoch, "Did not update justified epoch")
assert.Equal(t, types.Epoch(1), s.finalizedEpoch, "Did not update finalized epoch")
}
func TestStore_ApplyScoreChanges_UpdateWeightsPositiveDelta(t *testing.T) {
// Construct 3 nodes with weight 100 on each node. The 3 nodes linked to each other.
s := &Store{nodes: []*Node{
{root: [32]byte{'A'}, weight: 100},
{root: [32]byte{'A'}, weight: 100},
{parent: 1, root: [32]byte{'A'}, weight: 100}}}
// Each node gets one unique vote. The weight should look like 103 <- 102 <- 101 because
// they get propagated back.
require.NoError(t, s.applyWeightChanges(context.Background(), 0, 0, []uint64{}, []int{1, 1, 1}))
assert.Equal(t, uint64(103), s.nodes[0].weight)
assert.Equal(t, uint64(102), s.nodes[1].weight)
assert.Equal(t, uint64(101), s.nodes[2].weight)
}
func TestStore_ApplyScoreChanges_UpdateWeightsNegativeDelta(t *testing.T) {
// Construct 3 nodes with weight 100 on each node. The 3 nodes linked to each other.
s := &Store{nodes: []*Node{
{root: [32]byte{'A'}, weight: 100},
{root: [32]byte{'A'}, weight: 100},
{parent: 1, root: [32]byte{'A'}, weight: 100}}}
// Each node gets one unique vote which contributes to negative delta.
// The weight should look like 97 <- 98 <- 99 because they get propagated back.
require.NoError(t, s.applyWeightChanges(context.Background(), 0, 0, []uint64{}, []int{-1, -1, -1}))
assert.Equal(t, uint64(97), s.nodes[0].weight)
assert.Equal(t, uint64(98), s.nodes[1].weight)
assert.Equal(t, uint64(99), s.nodes[2].weight)
}
func TestStore_ApplyScoreChanges_UpdateWeightsMixedDelta(t *testing.T) {
// Construct 3 nodes with weight 100 on each node. The 3 nodes linked to each other.
s := &Store{nodes: []*Node{
{root: [32]byte{'A'}, weight: 100},
{root: [32]byte{'A'}, weight: 100},
{parent: 1, root: [32]byte{'A'}, weight: 100}}}
// Each node gets one mixed vote. The weight should look like 100 <- 200 <- 250.
require.NoError(t, s.applyWeightChanges(context.Background(), 0, 0, []uint64{}, []int{-100, -50, 150}))
assert.Equal(t, uint64(100), s.nodes[0].weight)
assert.Equal(t, uint64(200), s.nodes[1].weight)
assert.Equal(t, uint64(250), s.nodes[2].weight)
}
func TestStore_UpdateBestChildAndDescendant_RemoveChild(t *testing.T) {
// Make parent's best child equal's to input child index and child is not viable.
s := &Store{nodes: []*Node{{bestChild: 1}, {}}, justifiedEpoch: 1, finalizedEpoch: 1}
require.NoError(t, s.updateBestChildAndDescendant(0, 1))
// Verify parent's best child and best descendant are `none`.
assert.Equal(t, NonExistentNode, s.nodes[0].bestChild, "Did not get correct best child index")
assert.Equal(t, NonExistentNode, s.nodes[0].bestDescendant, "Did not get correct best descendant index")
}
func TestStore_UpdateBestChildAndDescendant_UpdateDescendant(t *testing.T) {
// Make parent's best child equal to child index and child is viable.
s := &Store{nodes: []*Node{{bestChild: 1}, {bestDescendant: NonExistentNode}}}
require.NoError(t, s.updateBestChildAndDescendant(0, 1))
// Verify parent's best child is the same and best descendant is not set to child index.
assert.Equal(t, uint64(1), s.nodes[0].bestChild, "Did not get correct best child index")
assert.Equal(t, uint64(1), s.nodes[0].bestDescendant, "Did not get correct best descendant index")
}
func TestStore_UpdateBestChildAndDescendant_ChangeChildByViability(t *testing.T) {
// Make parent's best child not equal to child index, child leads to viable index and
// parent's best child doesn't lead to viable index.
s := &Store{
justifiedEpoch: 1,
finalizedEpoch: 1,
nodes: []*Node{{bestChild: 1, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1}}}
require.NoError(t, s.updateBestChildAndDescendant(0, 2))
// Verify parent's best child and best descendant are set to child index.
assert.Equal(t, uint64(2), s.nodes[0].bestChild, "Did not get correct best child index")
assert.Equal(t, uint64(2), s.nodes[0].bestDescendant, "Did not get correct best descendant index")
}
func TestStore_UpdateBestChildAndDescendant_ChangeChildByWeight(t *testing.T) {
// Make parent's best child not equal to child index, child leads to viable index and
// parents best child leads to viable index but child has more weight than parent's best child.
s := &Store{
justifiedEpoch: 1,
finalizedEpoch: 1,
nodes: []*Node{{bestChild: 1, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1, weight: 1}}}
require.NoError(t, s.updateBestChildAndDescendant(0, 2))
// Verify parent's best child and best descendant are set to child index.
assert.Equal(t, uint64(2), s.nodes[0].bestChild, "Did not get correct best child index")
assert.Equal(t, uint64(2), s.nodes[0].bestDescendant, "Did not get correct best descendant index")
}
func TestStore_UpdateBestChildAndDescendant_ChangeChildAtLeaf(t *testing.T) {
// Make parent's best child to none and input child leads to viable index.
s := &Store{
justifiedEpoch: 1,
finalizedEpoch: 1,
nodes: []*Node{{bestChild: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1}}}
require.NoError(t, s.updateBestChildAndDescendant(0, 2))
// Verify parent's best child and best descendant are set to child index.
assert.Equal(t, uint64(2), s.nodes[0].bestChild, "Did not get correct best child index")
assert.Equal(t, uint64(2), s.nodes[0].bestDescendant, "Did not get correct best descendant index")
}
func TestStore_UpdateBestChildAndDescendant_NoChangeByViability(t *testing.T) {
// Make parent's best child not equal to child index, child leads to not viable index and
// parents best child leads to viable index.
s := &Store{
justifiedEpoch: 1,
finalizedEpoch: 1,
nodes: []*Node{{bestChild: 1, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode}}}
require.NoError(t, s.updateBestChildAndDescendant(0, 2))
// Verify parent's best child and best descendant are not changed.
assert.Equal(t, uint64(1), s.nodes[0].bestChild, "Did not get correct best child index")
assert.Equal(t, uint64(0), s.nodes[0].bestDescendant, "Did not get correct best descendant index")
}
func TestStore_UpdateBestChildAndDescendant_NoChangeByWeight(t *testing.T) {
// Make parent's best child not equal to child index, child leads to viable index and
// parents best child leads to viable index but parent's best child has more weight.
s := &Store{
justifiedEpoch: 1,
finalizedEpoch: 1,
nodes: []*Node{{bestChild: 1, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1, weight: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1}}}
require.NoError(t, s.updateBestChildAndDescendant(0, 2))
// Verify parent's best child and best descendant are not changed.
assert.Equal(t, uint64(1), s.nodes[0].bestChild, "Did not get correct best child index")
assert.Equal(t, uint64(0), s.nodes[0].bestDescendant, "Did not get correct best descendant index")
}
func TestStore_UpdateBestChildAndDescendant_NoChangeAtLeaf(t *testing.T) {
// Make parent's best child to none and input child does not lead to viable index.
s := &Store{
justifiedEpoch: 1,
finalizedEpoch: 1,
nodes: []*Node{{bestChild: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode, justifiedEpoch: 1, finalizedEpoch: 1},
{bestDescendant: NonExistentNode}}}
require.NoError(t, s.updateBestChildAndDescendant(0, 2))
// Verify parent's best child and best descendant are not changed.
assert.Equal(t, NonExistentNode, s.nodes[0].bestChild, "Did not get correct best child index")
assert.Equal(t, uint64(0), s.nodes[0].bestDescendant, "Did not get correct best descendant index")
}
func TestStore_Prune_LessThanThreshold(t *testing.T) {
// Define 100 nodes in store.
numOfNodes := 100
indices := make(map[[32]byte]uint64)
nodes := make([]*Node, 0)
indices[indexToHash(uint64(0))] = uint64(0)
nodes = append(nodes, &Node{
slot: types.Slot(0),
root: indexToHash(uint64(0)),
bestDescendant: uint64(numOfNodes - 1),
bestChild: uint64(1),
parent: NonExistentNode,
})
for i := 1; i < numOfNodes-1; i++ {
indices[indexToHash(uint64(i))] = uint64(i)
nodes = append(nodes, &Node{
slot: types.Slot(i),
root: indexToHash(uint64(i)),
bestDescendant: uint64(numOfNodes - 1),
bestChild: uint64(i + 1),
parent: uint64(i) - 1,
})
numOfNodes := uint64(100)
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
for i := uint64(2); i < numOfNodes; i++ {
require.NoError(t, f.ProcessBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0, false))
}
indices[indexToHash(uint64(numOfNodes-1))] = uint64(numOfNodes - 1)
nodes = append(nodes, &Node{
slot: types.Slot(numOfNodes - 1),
root: indexToHash(uint64(numOfNodes - 1)),
bestDescendant: NonExistentNode,
bestChild: NonExistentNode,
parent: uint64(numOfNodes - 2),
})
s := &Store{nodes: nodes, nodesIndices: indices, pruneThreshold: 100}
syncedTips := &optimisticStore{}
s := f.store
s.pruneThreshold = 100
// Finalized root is at index 99 so everything before 99 should be pruned,
// Finalized root has depth 99 so everything before it should be pruned,
// but PruneThreshold is at 100 so nothing will be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(99), syncedTips))
assert.Equal(t, 100, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 100, len(s.nodesIndices), "Incorrect node indices count")
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
assert.Equal(t, 100, len(s.nodeByRoot), "Incorrect nodes count")
}
func TestStore_Prune_MoreThanThreshold(t *testing.T) {
// Define 100 nodes in store.
numOfNodes := 100
indices := make(map[[32]byte]uint64)
nodes := make([]*Node, 0)
indices[indexToHash(uint64(0))] = uint64(0)
nodes = append(nodes, &Node{
slot: types.Slot(0),
root: indexToHash(uint64(0)),
bestDescendant: uint64(numOfNodes - 1),
bestChild: uint64(1),
parent: NonExistentNode,
})
for i := 1; i < numOfNodes-1; i++ {
indices[indexToHash(uint64(i))] = uint64(i)
nodes = append(nodes, &Node{
slot: types.Slot(i),
root: indexToHash(uint64(i)),
bestDescendant: uint64(numOfNodes - 1),
bestChild: uint64(i + 1),
parent: uint64(i) - 1,
})
numOfNodes := uint64(100)
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
for i := uint64(2); i < numOfNodes; i++ {
require.NoError(t, f.ProcessBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0, false))
}
nodes = append(nodes, &Node{
slot: types.Slot(numOfNodes - 1),
root: indexToHash(uint64(numOfNodes - 1)),
bestDescendant: NonExistentNode,
bestChild: NonExistentNode,
parent: uint64(numOfNodes - 2),
})
indices[indexToHash(uint64(numOfNodes-1))] = uint64(numOfNodes - 1)
s := &Store{nodes: nodes, nodesIndices: indices}
syncedTips := &optimisticStore{}
s := f.store
s.pruneThreshold = 0
// Finalized root is at index 99 so everything before 99 should be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(99), syncedTips))
assert.Equal(t, 1, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 1, len(s.nodesIndices), "Incorrect node indices count")
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
assert.Equal(t, 1, len(s.nodeByRoot), "Incorrect nodes count")
}
func TestStore_Prune_MoreThanOnce(t *testing.T) {
// Define 100 nodes in store.
numOfNodes := 100
indices := make(map[[32]byte]uint64)
nodes := make([]*Node, 0)
nodes = append(nodes, &Node{
slot: types.Slot(0),
root: indexToHash(uint64(0)),
bestDescendant: uint64(numOfNodes - 1),
bestChild: uint64(1),
parent: NonExistentNode,
})
for i := 1; i < numOfNodes-1; i++ {
indices[indexToHash(uint64(i))] = uint64(i)
nodes = append(nodes, &Node{
slot: types.Slot(i),
root: indexToHash(uint64(i)),
bestDescendant: uint64(numOfNodes - 1),
bestChild: uint64(i + 1),
parent: uint64(i) - 1,
})
numOfNodes := uint64(100)
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
for i := uint64(2); i < numOfNodes; i++ {
require.NoError(t, f.ProcessBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0, false))
}
nodes = append(nodes, &Node{
slot: types.Slot(numOfNodes - 1),
root: indexToHash(uint64(numOfNodes - 1)),
bestDescendant: NonExistentNode,
bestChild: NonExistentNode,
parent: uint64(numOfNodes - 2),
})
s := &Store{nodes: nodes, nodesIndices: indices}
syncedTips := &optimisticStore{}
s := f.store
s.pruneThreshold = 0
// Finalized root is at index 11 so everything before 11 should be pruned.
require.NoError(t, s.prune(context.Background(), indexToHash(10), syncedTips))
assert.Equal(t, 90, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 90, len(s.nodesIndices), "Incorrect node indices count")
require.NoError(t, s.prune(context.Background(), indexToHash(10)))
assert.Equal(t, 90, len(s.nodeByRoot), "Incorrect nodes count")
// One more time.
require.NoError(t, s.prune(context.Background(), indexToHash(20), syncedTips))
assert.Equal(t, 80, len(s.nodes), "Incorrect nodes count")
assert.Equal(t, 80, len(s.nodesIndices), "Incorrect node indices count")
require.NoError(t, s.prune(context.Background(), indexToHash(20)))
assert.Equal(t, 80, len(s.nodeByRoot), "Incorrect nodes count")
}
// This unit tests starts with a simple branch like this
@@ -435,41 +202,15 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
//
// And we finalize 1. As a result only 1 should survive
func TestStore_Prune_NoDanglingBranch(t *testing.T) {
nodes := []*Node{
{
slot: 100,
bestChild: 1,
bestDescendant: 1,
root: indexToHash(uint64(0)),
parent: NonExistentNode,
},
{
slot: 101,
root: indexToHash(uint64(1)),
bestChild: NonExistentNode,
bestDescendant: NonExistentNode,
parent: 0,
},
{
slot: 101,
root: indexToHash(uint64(2)),
parent: 0,
bestChild: NonExistentNode,
bestDescendant: NonExistentNode,
},
}
syncedTips := &optimisticStore{}
s := &Store{
pruneThreshold: 0,
nodes: nodes,
nodesIndices: map[[32]byte]uint64{
indexToHash(uint64(0)): 0,
indexToHash(uint64(1)): 1,
indexToHash(uint64(2)): 2,
},
}
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1)), syncedTips))
require.Equal(t, len(s.nodes), 1)
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0, false))
f.store.pruneThreshold = 0
s := f.store
require.NoError(t, s.prune(context.Background(), indexToHash(1)))
require.Equal(t, len(s.nodeByRoot), 1)
}
// This test starts with the following branching diagram
@@ -484,256 +225,58 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
// J -- K -- L
//
//
// Synced tips are B, D and E. And we finalize F. All that is left in fork
// choice is F, and the only synced tip left is E which is now away from Fork
// Choice.
func TestStore_PruneSyncedTips(t *testing.T) {
func TestStore_tips(t *testing.T) {
ctx := context.Background()
f := setup(1, 1)
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
syncedTips := &optimisticStore{
validatedTips: map[[32]byte]types.Slot{
[32]byte{'b'}: 101,
[32]byte{'d'}: 103,
[32]byte{'e'}: 104,
},
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
expectedMap := map[[32]byte]types.Slot{
[32]byte{'f'}: 105,
[32]byte{'i'}: 106,
[32]byte{'l'}: 106,
[32]byte{'j'}: 102,
}
f.syncedTips = syncedTips
f.store.pruneThreshold = 0
require.NoError(t, f.Prune(ctx, [32]byte{'f'}))
require.Equal(t, 1, len(f.syncedTips.validatedTips))
_, ok := f.syncedTips.validatedTips[[32]byte{'e'}]
require.Equal(t, true, ok)
}
func TestStore_LeadsToViableHead(t *testing.T) {
tests := []struct {
n *Node
justifiedEpoch types.Epoch
finalizedEpoch types.Epoch
want bool
}{
{&Node{}, 0, 0, true},
{&Node{}, 1, 0, false},
{&Node{}, 0, 1, false},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 1, 1, true},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 2, 2, false},
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, 3, true},
}
for _, tc := range tests {
s := &Store{
justifiedEpoch: tc.justifiedEpoch,
finalizedEpoch: tc.finalizedEpoch,
nodes: []*Node{tc.n},
}
got, err := s.leadsToViableHead(tc.n)
require.NoError(t, err)
assert.Equal(t, tc.want, got)
roots, slots := f.store.tips()
for i, r := range roots {
expectedSlot, ok := expectedMap[r]
require.Equal(t, true, ok)
require.Equal(t, slots[i], expectedSlot)
}
}
func TestStore_SetSyncedTips(t *testing.T) {
f := setup(1, 1)
tips := make(map[[32]byte]types.Slot)
require.ErrorIs(t, errInvalidSyncedTips, f.SetSyncedTips(tips))
tips[bytesutil.ToBytes32([]byte{'a'})] = 1
require.NoError(t, f.SetSyncedTips(tips))
f.syncedTips.RLock()
defer f.syncedTips.RUnlock()
require.Equal(t, 1, len(f.syncedTips.validatedTips))
slot, ok := f.syncedTips.validatedTips[bytesutil.ToBytes32([]byte{'a'})]
require.Equal(t, true, ok)
require.Equal(t, types.Slot(1), slot)
}
func TestStore_PruneMapsNodes(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0, false))
s := f.store
s.pruneThreshold = 0
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1))))
require.Equal(t, len(s.nodeByRoot), 1)
func TestStore_ViableForHead(t *testing.T) {
tests := []struct {
n *Node
justifiedEpoch types.Epoch
finalizedEpoch types.Epoch
want bool
}{
{&Node{}, 0, 0, true},
{&Node{}, 1, 0, false},
{&Node{}, 0, 1, false},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 1, 1, true},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 2, 2, false},
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, 3, true},
}
for _, tc := range tests {
s := &Store{
justifiedEpoch: tc.justifiedEpoch,
finalizedEpoch: tc.finalizedEpoch,
}
assert.Equal(t, tc.want, s.viableForHead(tc.n))
}
}
func TestStore_HasParent(t *testing.T) {
tests := []struct {
m map[[32]byte]uint64
n []*Node
r [32]byte
want bool
}{
{r: [32]byte{'a'}, want: false},
{m: map[[32]byte]uint64{{'a'}: 0}, r: [32]byte{'a'}, want: false},
{m: map[[32]byte]uint64{{'a'}: 0}, r: [32]byte{'a'},
n: []*Node{{parent: NonExistentNode}}, want: false},
{m: map[[32]byte]uint64{{'a'}: 0},
n: []*Node{{parent: 0}}, r: [32]byte{'a'},
want: true},
}
for _, tc := range tests {
f := &ForkChoice{store: &Store{
nodesIndices: tc.m,
nodes: tc.n,
}}
assert.Equal(t, tc.want, f.HasParent(tc.r))
}
}
func TestStore_AncestorRoot(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
f := &ForkChoice{store: &Store{}}
f.store.nodesIndices = map[[32]byte]uint64{}
_, err := f.AncestorRoot(ctx, [32]byte{'a'}, 0)
assert.ErrorContains(t, "node does not exist", err)
f.store.nodesIndices[[32]byte{'a'}] = 0
_, err = f.AncestorRoot(ctx, [32]byte{'a'}, 0)
assert.ErrorContains(t, "node index out of range", err)
f.store.nodesIndices[[32]byte{'b'}] = 1
f.store.nodesIndices[[32]byte{'c'}] = 2
f.store.nodes = []*Node{
{slot: 1, root: [32]byte{'a'}, parent: NonExistentNode},
{slot: 2, root: [32]byte{'b'}, parent: 0},
{slot: 3, root: [32]byte{'c'}, parent: 1},
}
r, err := f.AncestorRoot(ctx, [32]byte{'c'}, 1)
require.NoError(t, err)
assert.Equal(t, bytesutil.ToBytes32(r), [32]byte{'a'})
r, err = f.AncestorRoot(ctx, [32]byte{'c'}, 2)
require.NoError(t, err)
assert.Equal(t, bytesutil.ToBytes32(r), [32]byte{'b'})
}
func TestStore_AncestorRootOutOfBound(t *testing.T) {
ctx := context.Background()
f := &ForkChoice{store: &Store{}}
f.store.nodesIndices = map[[32]byte]uint64{}
_, err := f.AncestorRoot(ctx, [32]byte{'a'}, 0)
assert.ErrorContains(t, "node does not exist", err)
f.store.nodesIndices[[32]byte{'a'}] = 0
_, err = f.AncestorRoot(ctx, [32]byte{'a'}, 0)
assert.ErrorContains(t, "node index out of range", err)
f.store.nodesIndices[[32]byte{'b'}] = 1
f.store.nodesIndices[[32]byte{'c'}] = 2
f.store.nodes = []*Node{
{slot: 1, root: [32]byte{'a'}, parent: NonExistentNode},
{slot: 2, root: [32]byte{'b'}, parent: 100}, // Out of bound parent.
{slot: 3, root: [32]byte{'c'}, parent: 1},
}
_, err = f.AncestorRoot(ctx, [32]byte{'c'}, 1)
require.ErrorContains(t, "node index out of range", err)
}
func TestStore_UpdateCanonicalNodes_WholeList(t *testing.T) {
ctx := context.Background()
f := &ForkChoice{store: &Store{}}
f.store.canonicalNodes = map[[32]byte]bool{}
f.store.nodesIndices = map[[32]byte]uint64{}
f.store.nodes = []*Node{
{slot: 1, root: [32]byte{'a'}, parent: NonExistentNode},
{slot: 2, root: [32]byte{'b'}, parent: 0},
{slot: 3, root: [32]byte{'c'}, parent: 1},
}
f.store.nodesIndices[[32]byte{'c'}] = 2
require.NoError(t, f.store.updateCanonicalNodes(ctx, [32]byte{'c'}))
require.Equal(t, len(f.store.nodes), len(f.store.canonicalNodes))
require.Equal(t, true, f.IsCanonical([32]byte{'a'}))
require.Equal(t, true, f.IsCanonical([32]byte{'b'}))
require.Equal(t, true, f.IsCanonical([32]byte{'c'}))
idxc := f.store.nodesIndices[[32]byte{'c'}]
_, ok := f.store.nodesIndices[[32]byte{'d'}]
require.Equal(t, idxc, uint64(2))
require.Equal(t, false, ok)
}
func TestStore_UpdateCanonicalNodes_ParentAlreadyIn(t *testing.T) {
ctx := context.Background()
f := &ForkChoice{store: &Store{}}
f.store.canonicalNodes = map[[32]byte]bool{}
f.store.nodesIndices = map[[32]byte]uint64{}
f.store.nodes = []*Node{
{},
{slot: 2, root: [32]byte{'b'}, parent: 0},
{slot: 3, root: [32]byte{'c'}, parent: 1},
}
f.store.nodesIndices[[32]byte{'c'}] = 2
f.store.canonicalNodes[[32]byte{'b'}] = true
require.NoError(t, f.store.updateCanonicalNodes(ctx, [32]byte{'c'}))
require.Equal(t, len(f.store.nodes)-1, len(f.store.canonicalNodes))
require.Equal(t, true, f.IsCanonical([32]byte{'c'}))
require.Equal(t, true, f.IsCanonical([32]byte{'b'}))
}
func TestStore_UpdateCanonicalNodes_ContextCancelled(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
f := &ForkChoice{store: &Store{}}
f.store.canonicalNodes = map[[32]byte]bool{}
f.store.nodesIndices = map[[32]byte]uint64{}
f.store.nodes = []*Node{
{slot: 1, root: [32]byte{'a'}, parent: NonExistentNode},
{slot: 2, root: [32]byte{'b'}, parent: 0},
{slot: 3, root: [32]byte{'c'}, parent: 1},
}
f.store.nodesIndices[[32]byte{'c'}] = 2
cancel()
require.ErrorContains(t, "context canceled", f.store.updateCanonicalNodes(ctx, [32]byte{'c'}))
}
func TestStore_UpdateCanonicalNodes_RemoveOldCanonical(t *testing.T) {
ctx := context.Background()
f := &ForkChoice{store: &Store{}}
f.store.canonicalNodes = map[[32]byte]bool{}
f.store.nodesIndices = map[[32]byte]uint64{
[32]byte{'a'}: 0,
[32]byte{'b'}: 1,
[32]byte{'c'}: 2,
[32]byte{'d'}: 3,
[32]byte{'e'}: 4,
}
f.store.nodes = []*Node{
{slot: 1, root: [32]byte{'a'}, parent: NonExistentNode},
{slot: 2, root: [32]byte{'b'}, parent: 0},
{slot: 3, root: [32]byte{'c'}, parent: 1},
{slot: 4, root: [32]byte{'d'}, parent: 1},
{slot: 5, root: [32]byte{'e'}, parent: 3},
}
require.NoError(t, f.store.updateCanonicalNodes(ctx, [32]byte{'c'}))
require.Equal(t, 3, len(f.store.canonicalNodes))
require.NoError(t, f.store.updateCanonicalNodes(ctx, [32]byte{'e'}))
require.Equal(t, 4, len(f.store.canonicalNodes))
require.Equal(t, true, f.IsCanonical([32]byte{'a'}))
require.Equal(t, true, f.IsCanonical([32]byte{'b'}))
require.Equal(t, true, f.IsCanonical([32]byte{'d'}))
require.Equal(t, true, f.IsCanonical([32]byte{'e'}))
_, ok := f.store.canonicalNodes[[32]byte{'c'}]
require.Equal(t, false, ok)
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 1, 1, false))
require.Equal(t, false, f.HasParent(params.BeaconConfig().ZeroHash))
require.Equal(t, true, f.HasParent(indexToHash(1)))
require.Equal(t, true, f.HasParent(indexToHash(2)))
require.Equal(t, true, f.HasParent(indexToHash(3)))
require.Equal(t, false, f.HasParent(indexToHash(4)))
}

View File

@@ -9,25 +9,23 @@ import (
// ForkChoice defines the overall fork choice store which includes all block nodes, validator's latest votes and balances.
type ForkChoice struct {
store *Store
votes []Vote // tracks individual validator's last vote.
votesLock sync.RWMutex
balances []uint64 // tracks individual validator's last justified balances.
syncedTips *optimisticStore
store *Store
votes []Vote // tracks individual validator's last vote.
votesLock sync.RWMutex
balances []uint64 // tracks individual validator's last justified balances.
}
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
type Store struct {
pruneThreshold uint64 // do not prune tree unless threshold is reached.
justifiedEpoch types.Epoch // latest justified epoch in store.
finalizedEpoch types.Epoch // latest finalized epoch in store.
finalizedRoot [fieldparams.RootLength]byte // latest finalized root in store.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
previousProposerBoostScore uint64 // previous proposer boosted root score.
nodes []*Node // list of block nodes, each node is a representation of one block.
nodesIndices map[[fieldparams.RootLength]byte]uint64 // the root of block node and the nodes index in the list.
canonicalNodes map[[fieldparams.RootLength]byte]bool // the canonical block nodes.
justifiedEpoch types.Epoch // latest justified epoch in store.
finalizedEpoch types.Epoch // latest finalized epoch in store.
pruneThreshold uint64 // do not prune tree unless threshold is reached.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
previousProposerBoostScore uint64 // previous proposer boosted root score.
treeRootNode *Node // the root node of the store tree.
headNode *Node // last head Node
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
nodesLock sync.RWMutex
proposerBoostLock sync.RWMutex
}
@@ -37,20 +35,14 @@ type Store struct {
type Node struct {
slot types.Slot // slot of the block converted to the node.
root [fieldparams.RootLength]byte // root of the block converted to the node.
parent uint64 // parent index of this node.
parent *Node // parent index of this node.
children []*Node // the list of direct children of this Node
justifiedEpoch types.Epoch // justifiedEpoch of this node.
finalizedEpoch types.Epoch // finalizedEpoch of this node.
weight uint64 // weight of this node.
bestChild uint64 // bestChild index of this node.
bestDescendant uint64 // bestDescendant of this node.
graffiti [fieldparams.RootLength]byte // graffiti of the block node.
}
// optimisticStore defines a structure that tracks the tips of the fully
// validated blocks tree.
type optimisticStore struct {
validatedTips map[[32]byte]types.Slot
sync.RWMutex
balance uint64 // the balance that voted for this node directly
weight uint64 // weight of this node: the total balance including children
bestDescendant *Node // bestDescendant node of this node.
optimistic bool // whether the block has been fully validated or not
}
// Vote defines an individual validator's vote.
@@ -59,6 +51,3 @@ type Vote struct {
nextRoot [fieldparams.RootLength]byte // next voting root.
nextEpoch types.Epoch // epoch of next voting period.
}
// NonExistentNode defines an unknown node which is used for the array based stateful DAG.
const NonExistentNode = ^uint64(0)

View File

@@ -12,7 +12,6 @@ import (
func TestVotes_CanFindHead(t *testing.T) {
balances := []uint64{1, 1}
f := setup(1, 1)
syncedTips := &optimisticStore{}
// The head should always start at the finalized block.
r, err := f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
@@ -23,7 +22,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 0
// /
// 2 <- head
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, true))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -33,7 +32,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 0
// / \
// head -> 2 1
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, true))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -63,7 +62,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// head -> 2 1
// |
// 3
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1, true))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -99,7 +98,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 3
// |
// 4 <- head
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(4), indexToHash(3), 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(4), indexToHash(3), 1, 1, true))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -115,7 +114,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 4 <- head
// /
// 5 <- justified epoch = 2
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 2))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 2, true))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -131,7 +130,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 4 <- head
// / \
// 5 6 <- justified epoch = 0
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1, true))
// Moved 2 votes to block 5:
// 0
@@ -143,7 +142,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 4
// / \
// 2 votes-> 5 6
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1, true))
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(5), 4)
@@ -164,9 +163,9 @@ func TestVotes_CanFindHead(t *testing.T) {
// 8
// |
// 9
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(7), indexToHash(5), 2, 2))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(8), indexToHash(7), 2, 2))
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(9), indexToHash(8), 2, 2))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(7), indexToHash(5), 2, 2, true))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(8), indexToHash(7), 2, 2, true))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(9), indexToHash(8), 2, 2, true))
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
require.NoError(t, err)
@@ -211,7 +210,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// / \
// 2 votes->9 10
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(9), 5)
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(10), indexToHash(8), 2, 2))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(10), indexToHash(8), 2, 2, true))
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)
@@ -249,8 +248,8 @@ func TestVotes_CanFindHead(t *testing.T) {
// Verify pruning below the prune threshold does not affect head.
f.store.pruneThreshold = 1000
require.NoError(t, f.store.prune(context.Background(), indexToHash(5), syncedTips))
assert.Equal(t, 11, len(f.store.nodes), "Incorrect nodes length after prune")
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
assert.Equal(t, 11, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)
@@ -273,8 +272,8 @@ func TestVotes_CanFindHead(t *testing.T) {
// / \
// 9 10
f.store.pruneThreshold = 1
require.NoError(t, f.store.prune(context.Background(), indexToHash(5), syncedTips))
assert.Equal(t, 5, len(f.store.nodes), "Incorrect nodes length after prune")
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
assert.Equal(t, 5, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)
@@ -290,7 +289,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 9 10
// |
// head-> 11
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(11), indexToHash(9), 2, 2))
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(11), indexToHash(9), 2, 2, true))
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
require.NoError(t, err)

View File

@@ -24,7 +24,6 @@ go_library(
"//beacon-chain/db/slasherkv:go_default_library",
"//beacon-chain/deterministic-genesis:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/gateway:go_default_library",
"//beacon-chain/monitor:go_default_library",

View File

@@ -35,7 +35,7 @@ func configureHistoricalSlasher(cliCtx *cli.Context) {
params.OverrideBeaconConfig(c)
cmdConfig := cmd.Get()
// Allow up to 4096 attestations at a time to be requested from the beacon nde.
cmdConfig.MaxRPCPageSize = int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxAttestations)) // lint:ignore uintcast -- Page size should not exceed int64 with these constants.
cmdConfig.MaxRPCPageSize = int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxAttestations))
cmd.Init(cmdConfig)
log.Warnf(
"Setting %d slots per archive point and %d max RPC page size for historical slasher usage. This requires additional storage",

View File

@@ -27,7 +27,6 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/db/slasherkv"
interopcoldstart "github.com/prysmaticlabs/prysm/beacon-chain/deterministic-genesis"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/beacon-chain/gateway"
"github.com/prysmaticlabs/prysm/beacon-chain/monitor"
@@ -120,7 +119,6 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
configureEth1Config(cliCtx)
configureNetwork(cliCtx)
configureInteropConfig(cliCtx)
configureExecutionSetting(cliCtx)
// Initializes any forks here.
params.BeaconConfig().InitializeForkSchedule()
@@ -312,11 +310,8 @@ func (b *BeaconNode) Close() {
}
func (b *BeaconNode) startForkChoice() {
if features.Get().EnableForkChoiceDoublyLinkedTree {
b.forkChoiceStore = doublylinkedtree.New(0, 0)
} else {
b.forkChoiceStore = protoarray.New(0, 0, params.BeaconConfig().ZeroHash)
}
f := protoarray.New(0, 0)
b.forkChoiceStore = f
}
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
@@ -559,7 +554,6 @@ func (b *BeaconNode) registerBlockchainService() error {
blockchain.WithDatabase(b.db),
blockchain.WithDepositCache(b.depositCache),
blockchain.WithChainStartFetcher(web3Service),
blockchain.WithExecutionEngineCaller(web3Service.EngineAPIClient()),
blockchain.WithAttestationPool(b.attestationPool),
blockchain.WithExitPool(b.exitPool),
blockchain.WithSlashingPool(b.slashingsPool),
@@ -786,7 +780,6 @@ func (b *BeaconNode) registerRPCService() error {
StateGen: b.stateGen,
EnableDebugRPCEndpoints: enableDebugRPCEndpoints,
MaxMsgSize: maxMsgSize,
ExecutionEngineCaller: web3Service.EngineAPIClient(),
})
return b.services.RegisterService(rpcService)
@@ -839,7 +832,6 @@ func (b *BeaconNode) registerGRPCGateway() error {
selfCert := b.cliCtx.String(flags.CertFlag.Name)
maxCallSize := b.cliCtx.Uint64(cmd.GrpcMaxCallRecvMsgSizeFlag.Name)
httpModules := b.cliCtx.String(flags.HTTPModules.Name)
timeout := b.cliCtx.Int(cmd.ApiTimeoutFlag.Name)
if enableDebugRPCEndpoints {
maxCallSize = uint64(math.Max(float64(maxCallSize), debugGrpcMaxMsgSize))
}
@@ -861,7 +853,6 @@ func (b *BeaconNode) registerGRPCGateway() error {
apigateway.WithRemoteCert(selfCert),
apigateway.WithMaxCallRecvMsgSize(maxCallSize),
apigateway.WithAllowedOrigins(allowedOrigins),
apigateway.WithTimeout(uint64(timeout)),
}
if flags.EnableHTTPEthAPI(httpModules) {
opts = append(opts, apigateway.WithApiMiddleware(&apimiddleware.BeaconEndpointFactory{}))

View File

@@ -104,8 +104,8 @@ func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *
span.AddAttributes(
trace.BoolAttribute("hasPeer", hasPeer),
trace.Int64Attribute("slot", int64(att.Data.Slot)), // lint:ignore uintcast -- It's safe to do this for tracing.
trace.Int64Attribute("subnet", int64(subnet)), // lint:ignore uintcast -- It's safe to do this for tracing.
trace.Int64Attribute("slot", int64(att.Data.Slot)),
trace.Int64Attribute("subnet", int64(subnet)),
)
if !hasPeer {
@@ -160,8 +160,8 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
span.AddAttributes(
trace.BoolAttribute("hasPeer", hasPeer),
trace.Int64Attribute("slot", int64(sMsg.Slot)), // lint:ignore uintcast -- It's safe to do this for tracing.
trace.Int64Attribute("subnet", int64(subnet)), // lint:ignore uintcast -- It's safe to do this for tracing.
trace.Int64Attribute("slot", int64(sMsg.Slot)),
trace.Int64Attribute("subnet", int64(subnet)),
)
if !hasPeer {
@@ -213,9 +213,7 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
if span.IsRecordingEvents() {
id := hash.FastSum64(buf.Bytes())
messageLen := int64(buf.Len())
// lint:ignore uintcast -- It's safe to do this for tracing.
iid := int64(id)
span.AddMessageSendEvent(iid, messageLen /*uncompressed*/, messageLen /*compressed*/)
span.AddMessageSendEvent(int64(id), messageLen /*uncompressed*/, messageLen /*compressed*/)
}
if err := s.PublishToTopic(ctx, topic+s.Encoding().ProtocolSuffix(), buf.Bytes()); err != nil {
err := errors.Wrap(err, "could not publish message")

Some files were not shown because too many files have changed in this diff Show More