mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
50 Commits
process-op
...
checkpoint
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
77f96f76b5 | ||
|
|
6516e95ebc | ||
|
|
4790430fb7 | ||
|
|
2daded945c | ||
|
|
e53d9dfc15 | ||
|
|
83e29dcabb | ||
|
|
a16afff75a | ||
|
|
fb8ac308ec | ||
|
|
9341b7d167 | ||
|
|
386e747c74 | ||
|
|
44f4fea2e3 | ||
|
|
0b1a777d62 | ||
|
|
d99aae264a | ||
|
|
82fbf93720 | ||
|
|
a8ddf23e6a | ||
|
|
5461c5b84f | ||
|
|
6b5e042865 | ||
|
|
03e7acbf9f | ||
|
|
6cc88e6454 | ||
|
|
65cea17268 | ||
|
|
78926f0729 | ||
|
|
aa86c94a91 | ||
|
|
75bb25d515 | ||
|
|
1e845bc276 | ||
|
|
58f7e942f2 | ||
|
|
3f1e3cf82f | ||
|
|
cbab0b63df | ||
|
|
5c838747e7 | ||
|
|
86ca70d84f | ||
|
|
60003c481b | ||
|
|
c1197d7881 | ||
|
|
693cc79cc9 | ||
|
|
23778959eb | ||
|
|
564d0213fc | ||
|
|
0ec276b7c2 | ||
|
|
a18cf8d59b | ||
|
|
784a349fff | ||
|
|
92278e2255 | ||
|
|
c658c08f8f | ||
|
|
213031b4b6 | ||
|
|
a3f0a53f02 | ||
|
|
154b451754 | ||
|
|
eb6ad5266c | ||
|
|
d817099f72 | ||
|
|
c38844a891 | ||
|
|
c0384ec30a | ||
|
|
5d66e57d65 | ||
|
|
2186ec4671 | ||
|
|
c712a40a22 | ||
|
|
1ee09fc88c |
3
.bazelrc
3
.bazelrc
@@ -217,3 +217,6 @@ build:remote --remote_local_fallback
|
||||
|
||||
# Ignore GoStdLib with remote caching
|
||||
build --modify_execution_info='GoStdlib.*=+no-remote-cache'
|
||||
|
||||
# Set bazel gotag
|
||||
build --define gotags=bazel
|
||||
|
||||
@@ -1 +1 @@
|
||||
4.2.2
|
||||
5.0.0
|
||||
|
||||
@@ -10,26 +10,26 @@
|
||||
|
||||
# Prysm specific remote-cache properties.
|
||||
#build:remote-cache --disk_cache=
|
||||
build:remote-cache --remote_download_toplevel
|
||||
build:remote-cache --remote_download_minimal
|
||||
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --remote_local_fallback
|
||||
build:remote-cache --experimental_remote_cache_async
|
||||
build:remote-cache --experimental_remote_merkle_tree_cache
|
||||
build:remote-cache --experimental_action_cache_store_output_metadata
|
||||
build:remote-cache --experimental_remote_cache_compression
|
||||
# Enforce stricter environment rules, which eliminates some non-hermetic
|
||||
# behavior and therefore improves both the remote cache hit rate and the
|
||||
# correctness and repeatability of the build.
|
||||
build:remote-cache --incompatible_strict_action_env=true
|
||||
|
||||
build --experimental_use_hermetic_linux_sandbox
|
||||
|
||||
# Import workspace options.
|
||||
import %workspace%/.bazelrc
|
||||
|
||||
startup --host_jvm_args=-Xmx2g --host_jvm_args=-Xms2g
|
||||
query --repository_cache=/tmp/repositorycache
|
||||
query --experimental_repository_cache_hardlinks
|
||||
build --repository_cache=/tmp/repositorycache
|
||||
build --experimental_repository_cache_hardlinks
|
||||
startup --host_jvm_args=-Xmx4g --host_jvm_args=-Xms2g
|
||||
build --experimental_strict_action_env
|
||||
build --disk_cache=/tmp/bazelbuilds
|
||||
build --experimental_multi_threaded_digest
|
||||
build --sandbox_tmpfs_path=/tmp
|
||||
build --verbose_failures
|
||||
build --announce_rc
|
||||
|
||||
@@ -3,7 +3,7 @@ load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
|
||||
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
|
||||
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "nogo")
|
||||
load("@graknlabs_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
|
||||
load("@vaticle_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
|
||||
load("@bazel_skylib//rules:common_settings.bzl", "string_setting")
|
||||
|
||||
prefix = "github.com/prysmaticlabs/prysm"
|
||||
@@ -16,6 +16,8 @@ exports_files([
|
||||
# gazelle:map_kind go_library go_library @prysm//tools/go:def.bzl
|
||||
# gazelle:map_kind go_test go_test @prysm//tools/go:def.bzl
|
||||
# gazelle:map_kind go_repository go_repository @prysm//tools/go:def.bzl
|
||||
# gazelle:build_tags bazel
|
||||
# gazelle:exclude tools/analyzers/**/testdata/**
|
||||
gazelle(
|
||||
name = "gazelle",
|
||||
prefix = prefix,
|
||||
@@ -125,6 +127,7 @@ nogo(
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/properpermissions:go_default_library",
|
||||
"//tools/analyzers/recursivelock:go_default_library",
|
||||
"//tools/analyzers/uintcast:go_default_library",
|
||||
] + select({
|
||||
# nogo checks that fail with coverage enabled.
|
||||
":coverage_enabled": [],
|
||||
|
||||
@@ -117,13 +117,6 @@ http_archive(
|
||||
urls = ["https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.76/fuzzit_Linux_x86_64.zip"],
|
||||
)
|
||||
|
||||
git_repository(
|
||||
name = "graknlabs_bazel_distribution",
|
||||
commit = "962f3a7e56942430c0ec120c24f9e9f2a9c2ce1a",
|
||||
remote = "https://github.com/graknlabs/bazel-distribution",
|
||||
shallow_since = "1569509514 +0300",
|
||||
)
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_docker//repositories:repositories.bzl",
|
||||
container_repositories = "repositories",
|
||||
|
||||
42
api/client/beacon/BUILD.bazel
Normal file
42
api/client/beacon/BUILD.bazel
Normal file
@@ -0,0 +1,42 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"checkpoint.go",
|
||||
"client.go",
|
||||
"doc.go",
|
||||
"errors.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/api/client/beacon",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/apimiddleware:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/detect:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_x_mod//semver:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"checkpoint_test.go",
|
||||
"client_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
279
api/client/beacon/checkpoint.go
Normal file
279
api/client/beacon/checkpoint.go
Normal file
@@ -0,0 +1,279 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/proto/detect"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + SignedBeaconBlock
|
||||
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
|
||||
// a weak subjectivity checkpoint, or to download a BeaconState+SignedBeaconBlock pair that can be used to bootstrap
|
||||
// a new Beacon Node using Checkpoint Sync.
|
||||
type WeakSubjectivityData struct {
|
||||
BlockRoot [32]byte
|
||||
StateRoot [32]byte
|
||||
Epoch types.Epoch
|
||||
}
|
||||
|
||||
// CheckpointString returns the standard string representation of a Checkpoint for the block root and epoch to which the
|
||||
// WeakSubjectivityData value refers.
|
||||
// The format is a a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
|
||||
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
|
||||
func (wsd *WeakSubjectivityData) CheckpointString() string {
|
||||
return fmt.Sprintf("%#x:%d", wsd.BlockRoot, wsd.Epoch)
|
||||
}
|
||||
|
||||
// OriginData represents the BeaconState and SignedBeaconBlock necessary to start an empty Beacon Node
|
||||
// using Checkpoint Sync.
|
||||
type OriginData struct {
|
||||
wsd *WeakSubjectivityData
|
||||
sb []byte
|
||||
bb []byte
|
||||
st state.BeaconState
|
||||
b block.SignedBeaconBlock
|
||||
cf *detect.ConfigFork
|
||||
}
|
||||
|
||||
// WeakSubjectivity returns the WeakSubjectivityData determined by DownloadOriginData.
|
||||
func (od *OriginData) WeakSubjectivity() *WeakSubjectivityData {
|
||||
return od.wsd
|
||||
}
|
||||
|
||||
// SaveBlock saves the downloaded block to a unique file in the given path.
|
||||
// For readability and collision avoidance, the file name includes: type, config name, slot and root
|
||||
func (od *OriginData) SaveBlock(dir string) (string, error) {
|
||||
statePath := path.Join(dir, fname("state", od.cf, od.st.Slot(), od.wsd.BlockRoot))
|
||||
return statePath, os.WriteFile(statePath, od.sb, 0600)
|
||||
}
|
||||
|
||||
// SaveState saves the downloaded state to a unique file in the given path.
|
||||
// For readability and collision avoidance, the file name includes: type, config name, slot and root
|
||||
func (od *OriginData) SaveState(dir string) (string, error) {
|
||||
statePath := path.Join(dir, fname("state", od.cf, od.st.Slot(), od.wsd.StateRoot))
|
||||
return statePath, os.WriteFile(statePath, od.sb, 0600)
|
||||
}
|
||||
|
||||
// StateBytes returns the ssz-encoded bytes of the downloaded BeaconState value.
|
||||
func (od *OriginData) StateBytes() []byte {
|
||||
return od.sb
|
||||
}
|
||||
|
||||
// BlockBytes returns the ssz-encoded bytes of the downloaded SignedBeaconBlock value.
|
||||
func (od *OriginData) BlockBytes() []byte {
|
||||
return od.bb
|
||||
}
|
||||
|
||||
func fname(prefix string, cf *detect.ConfigFork, slot types.Slot, root [32]byte) string {
|
||||
return fmt.Sprintf("%s_%s_%s_%d-%#x.ssz", prefix, cf.ConfigName.String(), cf.Fork.String(), slot, root)
|
||||
}
|
||||
|
||||
// this method downloads the head state, which can be used to find the correct chain config
|
||||
// and use prysm's helper methods to compute the latest weak subjectivity epoch.
|
||||
func getWeakSubjectivityEpochFromHead(ctx context.Context, client *Client) (types.Epoch, error) {
|
||||
headBytes, err := client.GetState(ctx, IdHead)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
cf, err := detect.ByState(headBytes)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("detected supported config in remote head state, name=%s, fork=%s", cf.ConfigName.String(), cf.Fork)
|
||||
headState, err := cf.UnmarshalBeaconState(headBytes)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error unmarshaling state to correct version")
|
||||
}
|
||||
|
||||
// LatestWeakSubjectivityEpoch uses package-level vars from the params package, so we need to override it
|
||||
params.OverrideBeaconConfig(cf.Config)
|
||||
epoch, err := helpers.LatestWeakSubjectivityEpoch(ctx, headState)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error computing the weak subjectivity epoch from head state")
|
||||
}
|
||||
|
||||
log.Printf("(computed client-side) weak subjectivity epoch = %d", epoch)
|
||||
return epoch, nil
|
||||
}
|
||||
|
||||
const (
|
||||
prysmMinimumVersion = "v2.0.7"
|
||||
prysmImplementationName = "Prysm"
|
||||
)
|
||||
|
||||
// ErrUnsupportedPrysmCheckpointVersion indicates remote beacon node can't be used for checkpoint retrieval.
|
||||
var ErrUnsupportedPrysmCheckpointVersion = errors.New("node does not meet minimum version requirements for checkpoint retrieval")
|
||||
|
||||
// for older endpoints or clients that do not support the weak_subjectivity api method (only prysm at release time)
|
||||
// we gather the necessary data for a checkpoint sync by:
|
||||
// - inspecting the remote server's head state and computing the weak subjectivity epoch locally
|
||||
// - requesting the state at the first slot of the epoch
|
||||
// - using hash_tree_root(state.latest_block_header) to compute the block the state integrates
|
||||
// - requesting that block by its root
|
||||
func downloadBackwardsCompatible(ctx context.Context, client *Client) (*OriginData, error) {
|
||||
log.Print("falling back to generic checkpoint derivation, weak_subjectivity API not supported by server")
|
||||
nv, err := client.GetNodeVersion(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to proceed with fallback method without confirming node version")
|
||||
}
|
||||
if nv.implementation == prysmImplementationName && semver.Compare(nv.semver, prysmMinimumVersion) < 0 {
|
||||
return nil, errors.Wrapf(ErrUnsupportedPrysmCheckpointVersion, "%s < minimum (%s)", nv.semver, prysmMinimumVersion)
|
||||
}
|
||||
epoch, err := getWeakSubjectivityEpochFromHead(ctx, client)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing weak subjectivity epoch via head state inspection")
|
||||
}
|
||||
|
||||
// use first slot of the epoch for the state slot
|
||||
slot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error computing first slot of epoch=%d", epoch)
|
||||
}
|
||||
|
||||
log.Printf("requesting checkpoint state at slot %d", slot)
|
||||
// get the state at the first slot of the epoch
|
||||
stateBytes, err := client.GetState(ctx, IdFromSlot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to request state by slot from api, slot=%d", slot)
|
||||
}
|
||||
|
||||
// ConfigFork is used to unmarshal the BeaconState so we can read the block root in latest_block_header
|
||||
cf, err := detect.ByState(stateBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", cf.ConfigName.String(), cf.Fork)
|
||||
|
||||
st, err := cf.UnmarshalBeaconState(stateBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error using detected config fork to unmarshal state bytes")
|
||||
}
|
||||
|
||||
// compute state and block roots
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of state")
|
||||
}
|
||||
|
||||
header := st.LatestBlockHeader()
|
||||
header.StateRoot = stateRoot[:]
|
||||
computedBlockRoot, err := header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error while computing block root using state data")
|
||||
}
|
||||
|
||||
blockBytes, err := client.GetBlock(ctx, IdFromRoot(computedBlockRoot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting block by slot = %d", slot)
|
||||
}
|
||||
block, err := cf.UnmarshalBeaconBlock(blockBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
|
||||
}
|
||||
blockRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root for block obtained via root")
|
||||
}
|
||||
|
||||
log.Printf("BeaconState slot=%d, Block slot=%d", st.Slot(), block.Block().Slot())
|
||||
log.Printf("BeaconState htr=%#xd, Block state_root=%#x", stateRoot, block.Block().StateRoot())
|
||||
log.Printf("BeaconBlock root computed from state=%#x, Block HTR=%#x", computedBlockRoot, blockRoot)
|
||||
|
||||
return &OriginData{
|
||||
wsd: &WeakSubjectivityData{
|
||||
BlockRoot: blockRoot,
|
||||
StateRoot: stateRoot,
|
||||
Epoch: epoch,
|
||||
},
|
||||
st: st,
|
||||
sb: stateBytes,
|
||||
b: block,
|
||||
bb: blockBytes,
|
||||
cf: cf,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DownloadOriginData attempts to use the proposed weak_subjectivity beacon node api
|
||||
// to obtain the weak_subjectivity metadata (epoch, block_root, state_root) needed to sync
|
||||
// a beacon node from the canonical weak subjectivity checkpoint. As this is a proposed API
|
||||
// that will only be supported by prysm at first, in the event of a 404 we fallback to using a
|
||||
// different technique where we first download the head state which can be used to compute the
|
||||
// weak subjectivity epoch on the client side.
|
||||
func DownloadOriginData(ctx context.Context, client *Client) (*OriginData, error) {
|
||||
ws, err := client.GetWeakSubjectivity(ctx)
|
||||
if err != nil {
|
||||
// a 404 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
|
||||
if !errors.Is(err, ErrNotFound) {
|
||||
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
|
||||
}
|
||||
// ok so it's a 404, use the head state method
|
||||
return downloadBackwardsCompatible(ctx, client)
|
||||
}
|
||||
log.Printf("server weak subjectivity checkpoint response - epoch=%d, block_root=%#x, state_root=%#x", ws.Epoch, ws.BlockRoot, ws.StateRoot)
|
||||
|
||||
// use first slot of the epoch for the block slot
|
||||
slot, err := slots.EpochStart(ws.Epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error computing first slot of epoch=%d", ws.Epoch)
|
||||
}
|
||||
log.Printf("requesting checkpoint state at slot %d", slot)
|
||||
|
||||
stateBytes, err := client.GetState(ctx, IdFromSlot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to request state by slot from api, slot=%d", slot)
|
||||
}
|
||||
cf, err := detect.ByState(stateBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", cf.ConfigName.String(), cf.Fork)
|
||||
|
||||
state, err := cf.UnmarshalBeaconState(stateBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error using detected config fork to unmarshal state bytes")
|
||||
}
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to compute htr for state at slot=%d", slot)
|
||||
}
|
||||
|
||||
blockRoot, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of latest_block_header")
|
||||
}
|
||||
blockBytes, err := client.GetBlock(ctx, IdFromRoot(ws.BlockRoot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting block by slot = %d", slot)
|
||||
}
|
||||
block, err := cf.UnmarshalBeaconBlock(blockBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
|
||||
}
|
||||
realBlockRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
|
||||
}
|
||||
log.Printf("BeaconState slot=%d, Block slot=%d", state.Slot(), block.Block().Slot())
|
||||
log.Printf("BeaconState htr=%#xd, Block state_root=%#x", stateRoot, block.Block().StateRoot())
|
||||
log.Printf("BeaconState latest_block_header htr=%#xd, block htr=%#x", blockRoot, realBlockRoot)
|
||||
return &OriginData{
|
||||
wsd: ws,
|
||||
st: state,
|
||||
b: block,
|
||||
sb: stateBytes,
|
||||
bb: blockBytes,
|
||||
cf: cf,
|
||||
}, nil
|
||||
}
|
||||
84
api/client/beacon/checkpoint_test.go
Normal file
84
api/client/beacon/checkpoint_test.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
type testRT struct {
|
||||
rt func(*http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
func (rt *testRT) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
if rt.rt != nil {
|
||||
return rt.rt(req)
|
||||
}
|
||||
return nil, errors.New("RoundTripper not implemented")
|
||||
}
|
||||
|
||||
var _ http.RoundTripper = &testRT{}
|
||||
|
||||
func marshalToEnvelope(val interface{}) ([]byte, error) {
|
||||
raw, err := json.Marshal(val)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error marsaling value to place in data envelope")
|
||||
}
|
||||
env := struct {
|
||||
Data json.RawMessage `json:"data"`
|
||||
}{
|
||||
Data: raw,
|
||||
}
|
||||
return json.Marshal(env)
|
||||
}
|
||||
|
||||
func TestMarshalToEnvelope(t *testing.T) {
|
||||
d := struct {
|
||||
Version string `json:"version"`
|
||||
}{
|
||||
Version: "Prysm/v2.0.5 (linux amd64)",
|
||||
}
|
||||
encoded, err := marshalToEnvelope(d)
|
||||
require.NoError(t, err)
|
||||
expected := `{"data":{"version":"Prysm/v2.0.5 (linux amd64)"}}`
|
||||
require.Equal(t, expected, string(encoded))
|
||||
}
|
||||
|
||||
func TestFallbackVersionCheck(t *testing.T) {
|
||||
c := &Client{
|
||||
hc: &http.Client{},
|
||||
host: "localhost:3500",
|
||||
scheme: "http",
|
||||
}
|
||||
c.hc.Transport = &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case get_node_version_path:
|
||||
res.StatusCode = http.StatusOK
|
||||
b := bytes.NewBuffer(nil)
|
||||
d := struct {
|
||||
Version string `json:"version"`
|
||||
}{
|
||||
Version: "Prysm/v2.0.5 (linux amd64)",
|
||||
}
|
||||
encoded, err := marshalToEnvelope(d)
|
||||
require.NoError(t, err)
|
||||
b.Write(encoded)
|
||||
res.Body = io.NopCloser(b)
|
||||
case get_weak_subjectivity_path:
|
||||
res.StatusCode = http.StatusNotFound
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}}
|
||||
|
||||
ctx := context.Background()
|
||||
_, err := DownloadOriginData(ctx, c)
|
||||
require.ErrorIs(t, err, ErrUnsupportedPrysmCheckpointVersion)
|
||||
}
|
||||
418
api/client/beacon/client.go
Normal file
418
api/client/beacon/client.go
Normal file
@@ -0,0 +1,418 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
get_signed_block_path = "/eth/v2/beacon/blocks"
|
||||
get_block_root_path = "/eth/v1/beacon/blocks/{{.Id}}/root"
|
||||
get_fork_for_state_path = "/eth/v1/beacon/states/{{.Id}}/fork"
|
||||
get_weak_subjectivity_path = "/eth/v1/beacon/weak_subjectivity"
|
||||
get_fork_schedule_path = "/eth/v1/config/fork_schedule"
|
||||
get_state_path = "/eth/v2/debug/beacon/states"
|
||||
get_node_version_path = "/eth/v1/node/version"
|
||||
)
|
||||
|
||||
// StateOrBlockId represents the block_id / state_id parameters that several of the Eth Beacon API methods accept.
|
||||
// StateOrBlockId constants are defined for named identifiers, and helper methods are provided
|
||||
// for slot and root identifiers. Example text from the Eth Beacon Node API documentation:
|
||||
//
|
||||
// "Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
|
||||
// <slot>, <hex encoded blockRoot with 0x prefix>."
|
||||
type StateOrBlockId string
|
||||
|
||||
const (
|
||||
IdFinalized StateOrBlockId = "finalized"
|
||||
IdGenesis StateOrBlockId = "genesis"
|
||||
IdHead StateOrBlockId = "head"
|
||||
IdJustified StateOrBlockId = "finalized"
|
||||
)
|
||||
|
||||
// IdFromRoot encodes a block root in the format expected by the API in places where a root can be used to identify
|
||||
// a BeaconState or SignedBeaconBlock.
|
||||
func IdFromRoot(r [32]byte) StateOrBlockId {
|
||||
return StateOrBlockId(fmt.Sprintf("%#x", r))
|
||||
}
|
||||
|
||||
// IdFromRoot encodes a Slot in the format expected by the API in places where a slot can be used to identify
|
||||
// a BeaconState or SignedBeaconBlock.
|
||||
func IdFromSlot(s types.Slot) StateOrBlockId {
|
||||
return StateOrBlockId(strconv.FormatUint(uint64(s), 10))
|
||||
}
|
||||
|
||||
// idTemplate is used to create template functions that can interpolate StateOrBlockId values.
|
||||
func idTemplate(ts string) func(StateOrBlockId) string {
|
||||
t := template.Must(template.New("").Parse(ts))
|
||||
f := func(id StateOrBlockId) string {
|
||||
b := bytes.NewBuffer(nil)
|
||||
err := t.Execute(b, struct{ Id string }{Id: string(id)})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("invalid idTemplate: %s", ts))
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
// run the template to ensure that it is valid
|
||||
// this should happen load time (using package scoped vars) to ensure runtime errors aren't possible
|
||||
_ = f(IdGenesis)
|
||||
return f
|
||||
}
|
||||
|
||||
// ClientOpt is a functional option for the Client type (http.Client wrapper)
|
||||
type ClientOpt func(*Client)
|
||||
|
||||
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
|
||||
func WithTimeout(timeout time.Duration) ClientOpt {
|
||||
return func(c *Client) {
|
||||
c.hc.Timeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
// Client provides a collection of helper methods for calling the Eth Beacon Node API endpoints.
|
||||
type Client struct {
|
||||
hc *http.Client
|
||||
host string
|
||||
scheme string
|
||||
}
|
||||
|
||||
// NewClient constructs a new client with the provided options (ex WithTimeout).
|
||||
// `host` is the base host + port used to construct request urls. This value can be
|
||||
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
|
||||
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
||||
host, err := validHostname(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
hc: &http.Client{},
|
||||
scheme: "http",
|
||||
host: host,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(c)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func validHostname(h string) (string, error) {
|
||||
// try to parse as url (being permissive)
|
||||
u, err := url.Parse(h)
|
||||
if err == nil && u.Host != "" {
|
||||
return u.Host, nil
|
||||
}
|
||||
// try to parse as host:port
|
||||
host, port, err := net.SplitHostPort(h)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("%s:%s", host, port), nil
|
||||
}
|
||||
|
||||
func (c *Client) urlForPath(methodPath string) *url.URL {
|
||||
u := &url.URL{
|
||||
Scheme: c.scheme,
|
||||
Host: c.host,
|
||||
}
|
||||
u.Path = path.Join(u.Path, methodPath)
|
||||
return u
|
||||
}
|
||||
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
func withSSZEncoding() reqOption {
|
||||
return func(req *http.Request) {
|
||||
req.Header.Set("Accept", "application/octet-stream")
|
||||
}
|
||||
}
|
||||
|
||||
// get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
|
||||
func (c *Client) get(ctx context.Context, path string, opts ...reqOption) ([]byte, error) {
|
||||
u := c.urlForPath(path)
|
||||
log.Printf("requesting %s", u.String())
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(req)
|
||||
}
|
||||
r, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
err = r.Body.Close()
|
||||
}()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return nil, non200Err(r)
|
||||
}
|
||||
b, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error reading http response body from GetBlock")
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// GetBlock retrieves the SignedBeaconBlock for the given block id.
|
||||
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
|
||||
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
|
||||
// for the named identifiers.
|
||||
// The return value contains the ssz-encoded bytes.
|
||||
func (c *Client) GetBlock(ctx context.Context, blockId StateOrBlockId) ([]byte, error) {
|
||||
blockPath := path.Join(get_signed_block_path, string(blockId))
|
||||
b, err := c.get(ctx, blockPath, withSSZEncoding())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting state by id = %s", blockId)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
var getBlockRootTpl = idTemplate(get_block_root_path)
|
||||
|
||||
// GetBlockRoot retrieves the hash_tree_root of the BeaconBlock for the given block id.
|
||||
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
|
||||
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
|
||||
// for the named identifiers.
|
||||
func (c *Client) GetBlockRoot(ctx context.Context, blockId StateOrBlockId) ([32]byte, error) {
|
||||
rootPath := getBlockRootTpl(blockId)
|
||||
b, err := c.get(ctx, rootPath)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrapf(err, "error requesting block root by id = %s", blockId)
|
||||
}
|
||||
jsonr := &struct{ Data struct{ Root string } }{}
|
||||
err = json.Unmarshal(b, jsonr)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "error decoding json data from get block root response")
|
||||
}
|
||||
rs, err := hexutil.Decode(jsonr.Data.Root)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, fmt.Sprintf("error decoding hex-encoded value %s", jsonr.Data.Root))
|
||||
}
|
||||
return bytesutil.ToBytes32(rs), nil
|
||||
}
|
||||
|
||||
var getForkTpl = idTemplate(get_fork_for_state_path)
|
||||
|
||||
// GetFork queries the Beacon Node API for the Fork from the state identified by stateId.
|
||||
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
|
||||
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
|
||||
// for the named identifiers.
|
||||
func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fork, error) {
|
||||
body, err := c.get(ctx, getForkTpl(stateId))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting fork by state id = %s", stateId)
|
||||
}
|
||||
fr := &forkResponse{}
|
||||
dataWrapper := &struct{ Data *forkResponse }{Data: fr}
|
||||
err = json.Unmarshal(body, dataWrapper)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error decoding json response in GetFork")
|
||||
}
|
||||
|
||||
return fr.Fork()
|
||||
}
|
||||
|
||||
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
|
||||
func (c *Client) GetForkSchedule(ctx context.Context) (params.OrderedForkSchedule, error) {
|
||||
body, err := c.get(ctx, get_fork_schedule_path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting fork schedule")
|
||||
}
|
||||
fsr := &forkScheduleResponse{}
|
||||
err = json.Unmarshal(body, fsr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ofs, err := fsr.OrderedForkSchedule()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", get_fork_schedule_path))
|
||||
}
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
type NodeVersion struct {
|
||||
implementation string
|
||||
semver string
|
||||
systemInfo string
|
||||
}
|
||||
|
||||
var versionRE = regexp.MustCompile(`^(\w+)\/(v\d+\.\d+\.\d+) \((.*)\)$`)
|
||||
|
||||
func parseNodeVersion(v string) (*NodeVersion, error) {
|
||||
groups := versionRE.FindStringSubmatch(v)
|
||||
if len(groups) != 4 {
|
||||
return nil, errors.Wrapf(ErrInvalidNodeVersion, "could not be parsed: %s", v)
|
||||
}
|
||||
return &NodeVersion{
|
||||
implementation: groups[1],
|
||||
semver: groups[2],
|
||||
systemInfo: groups[3],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetNodeVersion requests that the beacon node identify information about its implementation in a format
|
||||
// similar to a HTTP User-Agent field. ex: Lighthouse/v0.1.5 (Linux x86_64)
|
||||
func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
|
||||
b, err := c.get(ctx, get_node_version_path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting node version")
|
||||
}
|
||||
d := struct {
|
||||
Data struct {
|
||||
Version string `json:"version"`
|
||||
} `json:"data"`
|
||||
}{}
|
||||
err = json.Unmarshal(b, &d)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling response body: %s", string(b))
|
||||
}
|
||||
return parseNodeVersion(d.Data.Version)
|
||||
}
|
||||
|
||||
// GetState retrieves the BeaconState for the given state id.
|
||||
// State identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
|
||||
// <slot>, <hex encoded stateRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
|
||||
// for the named identifiers.
|
||||
// The return value contains the ssz-encoded bytes.
|
||||
func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte, error) {
|
||||
statePath := path.Join(get_state_path, string(stateId))
|
||||
b, err := c.get(ctx, statePath, withSSZEncoding())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting state by id = %s", stateId)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// GetWeakSubjectivity calls a proposed API endpoint that is unique to prysm
|
||||
// This api method does the following:
|
||||
// - computes weak subjectivity epoch
|
||||
// - finds the highest non-skipped block preceding the epoch
|
||||
// - returns the htr of the found block and returns this + the value of state_root from the block
|
||||
func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData, error) {
|
||||
body, err := c.get(ctx, get_weak_subjectivity_path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v := &apimiddleware.WeakSubjectivityResponse{}
|
||||
err = json.Unmarshal(body, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
epoch, err := strconv.ParseUint(v.Data.Checkpoint.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockRoot, err := hexutil.Decode(v.Data.Checkpoint.Root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateRoot, err := hexutil.Decode(v.Data.StateRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &WeakSubjectivityData{
|
||||
Epoch: types.Epoch(epoch),
|
||||
BlockRoot: bytesutil.ToBytes32(blockRoot),
|
||||
StateRoot: bytesutil.ToBytes32(stateRoot),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
bodyBytes, err := ioutil.ReadAll(response.Body)
|
||||
var body string
|
||||
if err != nil {
|
||||
body = "(Unable to read response body.)"
|
||||
} else {
|
||||
body = "response body:\n" + string(bodyBytes)
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
switch response.StatusCode {
|
||||
case 404:
|
||||
return errors.Wrap(ErrNotFound, msg)
|
||||
default:
|
||||
return errors.Wrap(ErrNotOK, msg)
|
||||
}
|
||||
}
|
||||
|
||||
type forkResponse struct {
|
||||
PreviousVersion string `json:"previous_version"`
|
||||
CurrentVersion string `json:"current_version"`
|
||||
Epoch string `json:"epoch"`
|
||||
}
|
||||
|
||||
func (f *forkResponse) Fork() (*ethpb.Fork, error) {
|
||||
epoch, err := strconv.ParseUint(f.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cSlice, err := hexutil.Decode(f.CurrentVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(cSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version for CurrentVersion, expected 4 bytes. hex=%s", len(cSlice), f.CurrentVersion)
|
||||
}
|
||||
pSlice, err := hexutil.Decode(f.PreviousVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(pSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(pSlice), f.PreviousVersion)
|
||||
}
|
||||
return ðpb.Fork{
|
||||
CurrentVersion: cSlice,
|
||||
PreviousVersion: pSlice,
|
||||
Epoch: types.Epoch(epoch),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type forkScheduleResponse struct {
|
||||
Data []forkResponse
|
||||
}
|
||||
|
||||
func (fsr *forkScheduleResponse) OrderedForkSchedule() (params.OrderedForkSchedule, error) {
|
||||
ofs := make(params.OrderedForkSchedule, 0)
|
||||
for _, d := range fsr.Data {
|
||||
epoch, err := strconv.Atoi(d.Epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vSlice, err := hexutil.Decode(d.CurrentVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(vSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
|
||||
}
|
||||
version := bytesutil.ToBytes4(vSlice)
|
||||
ofs = append(ofs, params.ForkScheduleEntry{
|
||||
Version: version,
|
||||
Epoch: types.Epoch(uint64(epoch)),
|
||||
})
|
||||
}
|
||||
sort.Sort(ofs)
|
||||
return ofs, nil
|
||||
}
|
||||
58
api/client/beacon/client_test.go
Normal file
58
api/client/beacon/client_test.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestParseNodeVersion(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
v string
|
||||
err error
|
||||
nv *NodeVersion
|
||||
}{
|
||||
{
|
||||
name: "empty string",
|
||||
v: "",
|
||||
err: ErrInvalidNodeVersion,
|
||||
},
|
||||
{
|
||||
name: "Prysm as the version string",
|
||||
v: "Prysm",
|
||||
err: ErrInvalidNodeVersion,
|
||||
},
|
||||
{
|
||||
name: "semver only",
|
||||
v: "v2.0.6",
|
||||
err: ErrInvalidNodeVersion,
|
||||
},
|
||||
{
|
||||
name: "implementation and semver only",
|
||||
v: "Prysm/v2.0.6",
|
||||
err: ErrInvalidNodeVersion,
|
||||
},
|
||||
{
|
||||
name: "complete version",
|
||||
v: "Prysm/v2.0.6 (linux amd64)",
|
||||
nv: &NodeVersion{
|
||||
implementation: "Prysm",
|
||||
semver: "v2.0.6",
|
||||
systemInfo: "linux amd64",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
nv, err := parseNodeVersion(c.v)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, c.nv, nv)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
6
api/client/beacon/doc.go
Normal file
6
api/client/beacon/doc.go
Normal file
@@ -0,0 +1,6 @@
|
||||
/*
|
||||
Package beacon provides a client for interacting with the standard Eth Beacon Node API.
|
||||
Interactive swagger documentation for the API is available here: https://ethereum.github.io/beacon-APIs/
|
||||
|
||||
*/
|
||||
package beacon
|
||||
12
api/client/beacon/errors.go
Normal file
12
api/client/beacon/errors.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package beacon
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
// ErrNotOK is used to indicate when an HTTP request to the Beacon Node API failed with any non-2xx response code.
|
||||
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
|
||||
var ErrNotOK = errors.New("did not receive 2xx response from API")
|
||||
|
||||
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
|
||||
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
|
||||
|
||||
var ErrInvalidNodeVersion = errors.New("invalid node version response")
|
||||
@@ -16,10 +16,13 @@ func TestHeadSlot_DataRace(t *testing.T) {
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
wait := make(chan struct{})
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
}()
|
||||
s.HeadSlot()
|
||||
<-wait
|
||||
}
|
||||
|
||||
func TestHeadRoot_DataRace(t *testing.T) {
|
||||
@@ -28,11 +31,14 @@ func TestHeadRoot_DataRace(t *testing.T) {
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
head: &head{root: [32]byte{'A'}},
|
||||
}
|
||||
wait := make(chan struct{})
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
}()
|
||||
_, err := s.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
<-wait
|
||||
}
|
||||
|
||||
func TestHeadBlock_DataRace(t *testing.T) {
|
||||
@@ -41,11 +47,14 @@ func TestHeadBlock_DataRace(t *testing.T) {
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
head: &head{block: wrapper.WrappedPhase0SignedBeaconBlock(ðpb.SignedBeaconBlock{})},
|
||||
}
|
||||
wait := make(chan struct{})
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
}()
|
||||
_, err := s.HeadBlock(context.Background())
|
||||
require.NoError(t, err)
|
||||
<-wait
|
||||
}
|
||||
|
||||
func TestHeadState_DataRace(t *testing.T) {
|
||||
@@ -53,9 +62,12 @@ func TestHeadState_DataRace(t *testing.T) {
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
}
|
||||
wait := make(chan struct{})
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
}()
|
||||
_, err := s.HeadState(context.Background())
|
||||
require.NoError(t, err)
|
||||
<-wait
|
||||
}
|
||||
|
||||
@@ -288,11 +288,11 @@ func TestService_ChainHeads_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0,
|
||||
params.BeaconConfig().ZeroHash)}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, 0, 0))
|
||||
|
||||
roots, slots := c.ChainHeads()
|
||||
require.DeepEqual(t, [][32]byte{{'c'}, {'d'}, {'e'}}, roots)
|
||||
@@ -302,11 +302,11 @@ func TestService_ChainHeads_ProtoArray(t *testing.T) {
|
||||
func TestService_ChainHeads_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, 0, 0, false))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, 0, 0))
|
||||
|
||||
roots, slots := c.ChainHeads()
|
||||
require.Equal(t, 3, len(roots))
|
||||
@@ -384,8 +384,8 @@ func TestService_IsOptimistic_ProtoArray(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
|
||||
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -400,8 +400,8 @@ func TestService_IsOptimistic_DoublyLinkedTree(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
|
||||
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -419,8 +419,8 @@ func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
|
||||
func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
|
||||
|
||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
@@ -430,8 +430,8 @@ func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
|
||||
func TestService_IsOptimisticForRoot_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0, true))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0, true))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 0, 0))
|
||||
|
||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !develop
|
||||
// +build !develop
|
||||
|
||||
package blockchain
|
||||
|
||||
@@ -83,8 +83,12 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
|
||||
}
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(10261) send optimistic status
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j, false /* optimistic status */); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, headStartRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,11 +25,11 @@ func TestService_newSlot(t *testing.T) {
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
require.NoError(t, fcs.ProcessBlock(ctx, 0, [32]byte{}, [32]byte{}, 0, 0, false)) // genesis
|
||||
require.NoError(t, fcs.ProcessBlock(ctx, 32, [32]byte{'a'}, [32]byte{}, 0, 0, false)) // finalized
|
||||
require.NoError(t, fcs.ProcessBlock(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, 0, 0, false)) // justified
|
||||
require.NoError(t, fcs.ProcessBlock(ctx, 96, [32]byte{'c'}, [32]byte{'a'}, 0, 0, false)) // best justified
|
||||
require.NoError(t, fcs.ProcessBlock(ctx, 97, [32]byte{'d'}, [32]byte{}, 0, 0, false)) // bad
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, 0, 0)) // genesis
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 32, [32]byte{'a'}, [32]byte{}, 0, 0)) // finalized
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, 0, 0)) // justified
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 96, [32]byte{'c'}, [32]byte{'a'}, 0, 0)) // best justified
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 97, [32]byte{'d'}, [32]byte{}, 0, 0)) // bad
|
||||
|
||||
type args struct {
|
||||
slot types.Slot
|
||||
|
||||
@@ -76,6 +76,9 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.Beac
|
||||
return nil, errors.Wrap(err, "could not notify forkchoice update from execution engine")
|
||||
}
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, s.headRoot()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set block to valid")
|
||||
}
|
||||
return payloadID, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -36,7 +36,6 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, altairBlk))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
|
||||
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
@@ -45,6 +44,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, 0, 0))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -256,7 +256,7 @@ func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, 1, 1, false))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, 1, 1))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0]))
|
||||
}
|
||||
|
||||
@@ -282,7 +282,7 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, 1, 1, false))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, 1, 1))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0]))
|
||||
}
|
||||
|
||||
@@ -571,8 +571,8 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b32.Block.Slot, r32, [32]byte{}, 0, 0, false))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b33.Block.Slot, r33, r32, 0, 0, false))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, b32.Block.Slot, r32, [32]byte{}, 0, 0))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, b33.Block.Slot, r33, r32, 0, 0))
|
||||
|
||||
_, err = service.cfg.ForkChoiceStore.Head(ctx, 0, r32, []uint64{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -110,13 +110,11 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
if err := s.notifyNewPayload(ctx, preStateVersion, preStateHeader, postState, signed); err != nil {
|
||||
return errors.Wrap(err, "could not verify new payload")
|
||||
}
|
||||
// TODO(10261) Check optimistic status
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */, false /*optimistic sync*/); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We add a proposer score boost to fork choice for the block root if applicable, right after
|
||||
// running a successful state transition for the block.
|
||||
if err := s.cfg.ForkChoiceStore.BoostProposerRoot(
|
||||
@@ -275,33 +273,32 @@ func getStateVersionAndPayload(preState state.BeaconState) (int, *ethpb.Executio
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlock,
|
||||
blockRoots [][32]byte) ([]*ethpb.Checkpoint, []*ethpb.Checkpoint, []bool, error) {
|
||||
blockRoots [][32]byte) ([]*ethpb.Checkpoint, []*ethpb.Checkpoint, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
if len(blks) == 0 || len(blockRoots) == 0 {
|
||||
return nil, nil, nil, errors.New("no blocks provided")
|
||||
return nil, nil, errors.New("no blocks provided")
|
||||
}
|
||||
if err := helpers.BeaconBlockIsNil(blks[0]); err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
b := blks[0].Block()
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
if err := s.verifyBlkPreState(ctx, b); err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(b.ParentRoot()))
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if preState == nil || preState.IsNil() {
|
||||
return nil, nil, nil, fmt.Errorf("nil pre state for slot %d", b.Slot())
|
||||
return nil, nil, fmt.Errorf("nil pre state for slot %d", b.Slot())
|
||||
}
|
||||
|
||||
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
fCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
optimistic := make([]bool, len(blks))
|
||||
sigSet := &bls.SignatureBatch{
|
||||
Signatures: [][]byte{},
|
||||
PublicKeys: []bls.PublicKey{},
|
||||
@@ -310,67 +307,60 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
|
||||
var set *bls.SignatureBatch
|
||||
boundaries := make(map[[32]byte]state.BeaconState)
|
||||
for i, b := range blks {
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := s.notifyNewPayload(ctx, preStateVersion, preStateHeader, preState, b); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
// Save potential boundary states.
|
||||
if slots.IsEpochStart(preState.Slot()) {
|
||||
boundaries[blockRoots[i]] = preState.Copy()
|
||||
if err := s.handleEpochBoundary(ctx, preState); err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "could not handle epoch boundary state")
|
||||
return nil, nil, errors.Wrap(err, "could not handle epoch boundary state")
|
||||
}
|
||||
}
|
||||
jCheckpoints[i] = preState.CurrentJustifiedCheckpoint()
|
||||
fCheckpoints[i] = preState.FinalizedCheckpoint()
|
||||
|
||||
sigSet.Join(set)
|
||||
}
|
||||
verify, err := sigSet.Verify()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if !verify {
|
||||
return nil, nil, nil, errors.New("batch block signature verification failed")
|
||||
return nil, nil, errors.New("batch block signature verification failed")
|
||||
}
|
||||
for r, st := range boundaries {
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
// Also saves the last post state which to be used as pre state for the next batch.
|
||||
lastB := blks[len(blks)-1]
|
||||
lastBR := blockRoots[len(blockRoots)-1]
|
||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := s.saveHeadNoDB(ctx, lastB, lastBR, preState); err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
return fCheckpoints, jCheckpoints, optimistic, nil
|
||||
return fCheckpoints, jCheckpoints, nil
|
||||
}
|
||||
|
||||
// handles a block after the block's batch has been verified, where we can save blocks
|
||||
// their state summaries and split them off to relative hot/cold storage.
|
||||
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed block.SignedBeaconBlock,
|
||||
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint, optimistic bool) error {
|
||||
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
b := signed.Block()
|
||||
|
||||
s.saveInitSyncBlock(blockRoot, signed)
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint, optimistic); err != nil {
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, b, bytesutil.ToBytes32(fCheckpoint.Root)); err != nil {
|
||||
// TODO(10261) send optimistic status
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: signed.Block().Slot(),
|
||||
Root: blockRoot[:],
|
||||
@@ -455,13 +445,13 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
|
||||
// This feeds in the block and block's attestations to fork choice store. It's allows fork choice store
|
||||
// to gain information on the most current chain.
|
||||
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk block.BeaconBlock, root [32]byte,
|
||||
st state.BeaconState, optimistic bool) error {
|
||||
st state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockAndAttestationsToForkChoiceStore")
|
||||
defer span.End()
|
||||
|
||||
fCheckpoint := st.FinalizedCheckpoint()
|
||||
jCheckpoint := st.CurrentJustifiedCheckpoint()
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint, optimistic); err != nil {
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
// Feed in block's attestations to fork choice store.
|
||||
@@ -480,24 +470,24 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
|
||||
}
|
||||
|
||||
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk block.BeaconBlock,
|
||||
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint, optimistic bool) error {
|
||||
//TODO(10261) check if the blocks are optimistic or not when filling fork choice
|
||||
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
// Feed in block to fork choice store.
|
||||
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
|
||||
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
|
||||
blk.Slot(), root, bytesutil.ToBytes32(blk.ParentRoot()),
|
||||
jCheckpoint.Epoch,
|
||||
fCheckpoint.Epoch, optimistic); err != nil {
|
||||
fCheckpoint.Epoch); err != nil {
|
||||
return errors.Wrap(err, "could not process block for proto array fork choice")
|
||||
}
|
||||
return nil
|
||||
// TODO(10261) send optimistic status
|
||||
return s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, root)
|
||||
}
|
||||
|
||||
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
|
||||
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
|
||||
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.SignedBeaconBlock, st state.BeaconState, initSync bool, optimistic bool) error {
|
||||
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.SignedBeaconBlock, st state.BeaconState, initSync bool) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
|
||||
defer span.End()
|
||||
if initSync {
|
||||
@@ -508,7 +498,7 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.Sig
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st, optimistic); err != nil {
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block().Slot())
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -366,14 +366,17 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk block.B
|
||||
for i := len(pendingNodes) - 1; i >= 0; i-- {
|
||||
b := pendingNodes[i]
|
||||
r := pendingRoots[i]
|
||||
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
|
||||
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
|
||||
b.Slot(), r, bytesutil.ToBytes32(b.ParentRoot()),
|
||||
jCheckpoint.Epoch,
|
||||
fCheckpoint.Epoch, false /* optimistic status */); err != nil {
|
||||
fCheckpoint.Epoch); err != nil {
|
||||
return errors.Wrap(err, "could not process block for proto array fork choice")
|
||||
}
|
||||
// TODO(10261) send optimistic status
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -295,7 +295,7 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
|
||||
rBlock.Block.ParentRoot = gRoot[:]
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
|
||||
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
|
||||
_, _, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
|
||||
_, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1035,7 +1035,7 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), 0, 0, false)) // Saves blocks to fork choice store.
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), 0, 0)) // Saves blocks to fork choice store.
|
||||
}
|
||||
|
||||
r, err := service.ancestor(context.Background(), r200[:], 150)
|
||||
@@ -1080,7 +1080,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock))) // Saves blocks to DB.
|
||||
}
|
||||
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), 200, r200, r200, 0, 0, false))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(context.Background(), 200, r200, r200, 0, 0))
|
||||
|
||||
r, err := service.ancestor(context.Background(), r200[:], 150)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -111,7 +111,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, 1, 1, false))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, 1, 1))
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
service.processAttestations(ctx)
|
||||
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations()))
|
||||
|
||||
@@ -77,7 +77,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBe
|
||||
defer span.End()
|
||||
|
||||
// Apply state transition on the incoming newly received blockCopy without verifying its BLS contents.
|
||||
fCheckpoints, jCheckpoints, optimistic, err := s.onBlockBatch(ctx, blocks, blkRoots)
|
||||
fCheckpoints, jCheckpoints, err := s.onBlockBatch(ctx, blocks, blkRoots)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not process block in batch")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -87,20 +87,10 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBe
|
||||
for i, b := range blocks {
|
||||
blockCopy := b.Copy()
|
||||
// TODO(10261) check optimistic status
|
||||
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i], false /*optimistic status*/); err != nil {
|
||||
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if !optimistic[i] {
|
||||
root, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, root); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
|
||||
@@ -166,7 +166,7 @@ func (s *Service) Status() error {
|
||||
|
||||
func (s *Service) startFromSavedState(saved state.BeaconState) error {
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0)
|
||||
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
|
||||
s.cfg.AttService.SetGenesisTime(saved.GenesisTime())
|
||||
|
||||
originRoot, err := s.originRootFromSavedState(s.ctx)
|
||||
@@ -233,7 +233,7 @@ func (s *Service) startFromSavedState(saved state.BeaconState) error {
|
||||
|
||||
func (s *Service) originRootFromSavedState(ctx context.Context) ([32]byte, error) {
|
||||
// first check if we have started from checkpoint sync and have a root
|
||||
originRoot, err := s.cfg.BeaconDB.OriginBlockRoot(ctx)
|
||||
originRoot, err := s.cfg.BeaconDB.OriginCheckpointBlockRoot(ctx)
|
||||
if err == nil {
|
||||
return originRoot, nil
|
||||
}
|
||||
@@ -241,7 +241,7 @@ func (s *Service) originRootFromSavedState(ctx context.Context) ([32]byte, error
|
||||
return originRoot, errors.Wrap(err, "could not retrieve checkpoint sync chain origin data from db")
|
||||
}
|
||||
|
||||
// we got here because OriginBlockRoot gave us an ErrNotFound. this means the node was started from a genesis state,
|
||||
// we got here because OriginCheckpointBlockRoot gave us an ErrNotFound. this means the node was started from a genesis state,
|
||||
// so we should have a value for GenesisBlock
|
||||
genesisBlock, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
@@ -449,14 +449,17 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
genesisCheckpoint := genesisState.FinalizedCheckpoint()
|
||||
s.store = store.New(genesisCheckpoint, genesisCheckpoint)
|
||||
|
||||
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
|
||||
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
|
||||
genesisBlk.Block().Slot(),
|
||||
genesisBlkRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
genesisCheckpoint.Epoch,
|
||||
genesisCheckpoint.Epoch, false /* optimistic status */); err != nil {
|
||||
genesisCheckpoint.Epoch); err != nil {
|
||||
log.Fatalf("Could not process genesis block for fork choice: %v", err)
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, genesisBlkRoot); err != nil {
|
||||
log.Fatalf("Could not set optimistic status of genesis block to false: %v", err)
|
||||
}
|
||||
|
||||
s.setHead(genesisBlkRoot, genesisBlk, genesisState)
|
||||
return nil
|
||||
|
||||
@@ -460,7 +460,7 @@ func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
|
||||
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
|
||||
|
||||
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
|
||||
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
|
||||
@@ -479,7 +479,7 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
|
||||
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
|
||||
|
||||
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
|
||||
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
|
||||
@@ -547,7 +547,7 @@ func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
|
||||
bs := ðpb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}}
|
||||
beaconState, err := v1.InitializeFromProto(bs)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
|
||||
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@@ -568,7 +568,7 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
bs := ðpb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}}
|
||||
beaconState, err := v1.InitializeFromProto(bs)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState, false))
|
||||
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
@@ -36,6 +36,7 @@ func NewWeakSubjectivityVerifier(wsc *ethpb.Checkpoint, db weakSubjectivityDB) (
|
||||
// per 7342, a nil checkpoint, zero-root or zero-epoch should all fail validation
|
||||
// and return an error instead of creating a WeakSubjectivityVerifier that permits any chain history.
|
||||
if wsc == nil || len(wsc.Root) == 0 || wsc.Epoch == 0 {
|
||||
log.Warn("No valid weak subjectivity checkpoint specified, running without weak subjectivity verification")
|
||||
return &WeakSubjectivityVerifier{
|
||||
enabled: false,
|
||||
}, nil
|
||||
@@ -81,13 +82,14 @@ func (v *WeakSubjectivityVerifier) VerifyWeakSubjectivity(ctx context.Context, f
|
||||
}
|
||||
filter := filters.NewFilter().SetStartSlot(v.slot).SetEndSlot(v.slot + params.BeaconConfig().SlotsPerEpoch)
|
||||
// A node should have the weak subjectivity block corresponds to the correct epoch in the DB.
|
||||
log.Infof("Searching block roots index for weak subjectivity root=%#x", v.root)
|
||||
roots, err := v.db.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error while retrieving block roots to verify weak subjectivity")
|
||||
}
|
||||
for _, root := range roots {
|
||||
if v.root == root {
|
||||
log.Info("Weak subjectivity check has passed")
|
||||
log.Info("Weak subjectivity check has passed!!")
|
||||
v.verified = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -14,58 +14,62 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 32
|
||||
b.Block.Slot = 1792480
|
||||
blockEpoch := slots.ToEpoch(b.Block.Slot)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
wsVerified bool
|
||||
disabled bool
|
||||
wantErr error
|
||||
checkpt *ethpb.Checkpoint
|
||||
finalizedEpoch types.Epoch
|
||||
name string
|
||||
}{
|
||||
{
|
||||
name: "nil root and epoch",
|
||||
},
|
||||
{
|
||||
name: "already verified",
|
||||
checkpt: ðpb.Checkpoint{Epoch: 2},
|
||||
finalizedEpoch: 2,
|
||||
wsVerified: true,
|
||||
name: "nil root and epoch",
|
||||
disabled: true,
|
||||
},
|
||||
{
|
||||
name: "not yet to verify, ws epoch higher than finalized epoch",
|
||||
checkpt: ðpb.Checkpoint{Epoch: 2},
|
||||
finalizedEpoch: 1,
|
||||
checkpt: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'a'}, 32), Epoch: blockEpoch},
|
||||
finalizedEpoch: blockEpoch - 1,
|
||||
},
|
||||
{
|
||||
name: "can't find the block in DB",
|
||||
checkpt: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength), Epoch: 1},
|
||||
finalizedEpoch: 3,
|
||||
finalizedEpoch: blockEpoch + 1,
|
||||
wantErr: errWSBlockNotFound,
|
||||
},
|
||||
{
|
||||
name: "can't find the block corresponds to ws epoch in DB",
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: 2}, // Root belongs in epoch 1.
|
||||
finalizedEpoch: 3,
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: blockEpoch - 2}, // Root belongs in epoch 1.
|
||||
finalizedEpoch: blockEpoch - 1,
|
||||
wantErr: errWSBlockNotFoundInEpoch,
|
||||
},
|
||||
{
|
||||
name: "can verify and pass",
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: 1},
|
||||
finalizedEpoch: 3,
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: blockEpoch},
|
||||
finalizedEpoch: blockEpoch + 1,
|
||||
},
|
||||
{
|
||||
name: "equal epoch",
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: blockEpoch},
|
||||
finalizedEpoch: blockEpoch,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wv, err := NewWeakSubjectivityVerifier(tt.checkpt, beaconDB)
|
||||
require.Equal(t, !tt.disabled, wv.enabled)
|
||||
require.NoError(t, err)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt},
|
||||
|
||||
9
beacon-chain/cache/active_balance.go
vendored
9
beacon-chain/cache/active_balance.go
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
@@ -17,10 +18,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
)
|
||||
|
||||
var (
|
||||
const (
|
||||
// maxBalanceCacheSize defines the max number of active balances can cache.
|
||||
maxBalanceCacheSize = uint64(4)
|
||||
maxBalanceCacheSize = int(4)
|
||||
)
|
||||
|
||||
var (
|
||||
// BalanceCacheMiss tracks the number of balance requests that aren't present in the cache.
|
||||
balanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "total_effective_balance_cache_miss",
|
||||
@@ -42,7 +45,7 @@ type BalanceCache struct {
|
||||
// NewEffectiveBalanceCache creates a new effective balance cache for storing/accessing total balance by epoch.
|
||||
func NewEffectiveBalanceCache() *BalanceCache {
|
||||
return &BalanceCache{
|
||||
cache: lruwrpr.New(int(maxBalanceCacheSize)),
|
||||
cache: lruwrpr.New(maxBalanceCacheSize),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
package cache
|
||||
|
||||
9
beacon-chain/cache/committee.go
vendored
9
beacon-chain/cache/committee.go
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
@@ -19,11 +20,13 @@ import (
|
||||
mathutil "github.com/prysmaticlabs/prysm/math"
|
||||
)
|
||||
|
||||
var (
|
||||
const (
|
||||
// maxCommitteesCacheSize defines the max number of shuffled committees on per randao basis can cache.
|
||||
// Due to reorgs and long finality, it's good to keep the old cache around for quickly switch over.
|
||||
maxCommitteesCacheSize = uint64(32)
|
||||
maxCommitteesCacheSize = int(32)
|
||||
)
|
||||
|
||||
var (
|
||||
// CommitteeCacheMiss tracks the number of committee requests that aren't present in the cache.
|
||||
CommitteeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "committee_cache_miss",
|
||||
@@ -55,7 +58,7 @@ func committeeKeyFn(obj interface{}) (string, error) {
|
||||
// NewCommitteesCache creates a new committee cache for storing/accessing shuffled indices of a committee.
|
||||
func NewCommitteesCache() *CommitteeCache {
|
||||
return &CommitteeCache{
|
||||
CommitteeCache: lruwrpr.New(int(maxCommitteesCacheSize)),
|
||||
CommitteeCache: lruwrpr.New(maxCommitteesCacheSize),
|
||||
inProgress: make(map[string]bool),
|
||||
}
|
||||
}
|
||||
|
||||
1
beacon-chain/cache/committee_disabled.go
vendored
1
beacon-chain/cache/committee_disabled.go
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
// This file is used in fuzzer builds to bypass global committee caches.
|
||||
|
||||
4
beacon-chain/cache/committee_fuzz_test.go
vendored
4
beacon-chain/cache/committee_fuzz_test.go
vendored
@@ -33,7 +33,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.Keys())), "Incorrect key size")
|
||||
assert.Equal(t, maxCommitteesCacheSize, len(cache.CommitteeCache.Keys()), "Incorrect key size")
|
||||
}
|
||||
|
||||
func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
|
||||
@@ -50,5 +50,5 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
|
||||
assert.DeepEqual(t, c.SortedIndices, indices)
|
||||
}
|
||||
|
||||
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.Keys())), "Incorrect key size")
|
||||
assert.Equal(t, maxCommitteesCacheSize, len(cache.CommitteeCache.Keys()), "Incorrect key size")
|
||||
}
|
||||
|
||||
2
beacon-chain/cache/committee_test.go
vendored
2
beacon-chain/cache/committee_test.go
vendored
@@ -102,7 +102,7 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
}
|
||||
|
||||
k := cache.CommitteeCache.Keys()
|
||||
assert.Equal(t, maxCommitteesCacheSize, uint64(len(k)))
|
||||
assert.Equal(t, maxCommitteesCacheSize, len(k))
|
||||
|
||||
sort.Slice(k, func(i, j int) bool {
|
||||
return k[i].(string) < k[j].(string)
|
||||
|
||||
1
beacon-chain/cache/proposer_indices.go
vendored
1
beacon-chain/cache/proposer_indices.go
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
// This file is used in fuzzer builds to bypass proposer indices caches.
|
||||
|
||||
2
beacon-chain/cache/subnet_ids.go
vendored
2
beacon-chain/cache/subnet_ids.go
vendored
@@ -27,7 +27,7 @@ var SubnetIDs = newSubnetIDs()
|
||||
func newSubnetIDs() *subnetIDs {
|
||||
// Given a node can calculate committee assignments of current epoch and next epoch.
|
||||
// Max size is set to 2 epoch length.
|
||||
cacheSize := int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxCommitteesPerSlot * 2))
|
||||
cacheSize := int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxCommitteesPerSlot * 2)) // lint:ignore uintcast -- constant values that would panic on startup if negative.
|
||||
attesterCache := lruwrpr.New(cacheSize)
|
||||
aggregatorCache := lruwrpr.New(cacheSize)
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
|
||||
1
beacon-chain/cache/sync_committee.go
vendored
1
beacon-chain/cache/sync_committee.go
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
package cache
|
||||
|
||||
@@ -225,7 +225,7 @@ func verifyDeposit(beaconState state.ReadOnlyBeaconState, deposit *ethpb.Deposit
|
||||
if ok := trie.VerifyMerkleProofWithDepth(
|
||||
receiptRoot,
|
||||
leaf[:],
|
||||
int(beaconState.Eth1DepositIndex()),
|
||||
beaconState.Eth1DepositIndex(),
|
||||
deposit.Proof,
|
||||
params.BeaconConfig().DepositContractTreeDepth,
|
||||
); !ok {
|
||||
|
||||
@@ -15,25 +15,7 @@ go_library(
|
||||
"weak_subjectivity.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd/beacon-chain:__subpackages__",
|
||||
"//contracts/deposit:__pkg__",
|
||||
"//crypto/keystore:__pkg__",
|
||||
"//network/forks:__pkg__",
|
||||
"//proto/prysm/v1alpha1:__subpackages__",
|
||||
"//proto/prysm/v1alpha1/attestation:__pkg__",
|
||||
"//runtime/interop:__pkg__",
|
||||
"//slasher:__subpackages__",
|
||||
"//testing/altair:__pkg__",
|
||||
"//testing/benchmark/benchmark_files:__subpackages__",
|
||||
"//testing/endtoend/evaluators:__pkg__",
|
||||
"//testing/slasher/simulator:__pkg__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
"//testing/util:__pkg__",
|
||||
"//tools:__subpackages__",
|
||||
"//validator:__subpackages__",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
@@ -13,8 +12,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/math"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
v1alpha1 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
@@ -122,20 +122,20 @@ func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconS
|
||||
// current_epoch = compute_epoch_at_slot(get_current_slot(store))
|
||||
// return current_epoch <= ws_state_epoch + ws_period
|
||||
func IsWithinWeakSubjectivityPeriod(
|
||||
ctx context.Context, currentEpoch types.Epoch, wsState state.ReadOnlyBeaconState, wsCheckpoint *eth.WeakSubjectivityCheckpoint) (bool, error) {
|
||||
ctx context.Context, currentEpoch types.Epoch, wsState state.ReadOnlyBeaconState, wsStateRoot [32]byte, wsEpoch types.Epoch) (bool, error) {
|
||||
// Make sure that incoming objects are not nil.
|
||||
if wsState == nil || wsState.IsNil() || wsState.LatestBlockHeader() == nil || wsCheckpoint == nil {
|
||||
if wsState == nil || wsState.IsNil() || wsState.LatestBlockHeader() == nil {
|
||||
return false, errors.New("invalid weak subjectivity state or checkpoint")
|
||||
}
|
||||
|
||||
// Assert that state and checkpoint have the same root and epoch.
|
||||
if !bytes.Equal(wsState.LatestBlockHeader().StateRoot, wsCheckpoint.StateRoot) {
|
||||
if bytesutil.ToBytes32(wsState.LatestBlockHeader().StateRoot) != wsStateRoot {
|
||||
return false, fmt.Errorf("state (%#x) and checkpoint (%#x) roots do not match",
|
||||
wsState.LatestBlockHeader().StateRoot, wsCheckpoint.StateRoot)
|
||||
wsState.LatestBlockHeader().StateRoot, wsStateRoot)
|
||||
}
|
||||
if slots.ToEpoch(wsState.Slot()) != wsCheckpoint.Epoch {
|
||||
if slots.ToEpoch(wsState.Slot()) != wsEpoch {
|
||||
return false, fmt.Errorf("state (%v) and checkpoint (%v) epochs do not match",
|
||||
slots.ToEpoch(wsState.Slot()), wsCheckpoint.Epoch)
|
||||
slots.ToEpoch(wsState.Slot()), wsEpoch)
|
||||
}
|
||||
|
||||
// Compare given epoch to state epoch + weak subjectivity period.
|
||||
@@ -164,7 +164,7 @@ func LatestWeakSubjectivityEpoch(ctx context.Context, st state.ReadOnlyBeaconSta
|
||||
}
|
||||
|
||||
// ParseWeakSubjectivityInputString parses "blocks_root:epoch_number" string into a checkpoint.
|
||||
func ParseWeakSubjectivityInputString(wsCheckpointString string) (*eth.Checkpoint, error) {
|
||||
func ParseWeakSubjectivityInputString(wsCheckpointString string) (*v1alpha1.Checkpoint, error) {
|
||||
if wsCheckpointString == "" {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -197,7 +197,7 @@ func ParseWeakSubjectivityInputString(wsCheckpointString string) (*eth.Checkpoin
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ð.Checkpoint{
|
||||
return &v1alpha1.Checkpoint{
|
||||
Epoch: types.Epoch(epoch),
|
||||
Root: bRoot,
|
||||
}, nil
|
||||
|
||||
@@ -54,12 +54,15 @@ func TestWeakSubjectivity_ComputeWeakSubjectivityPeriod(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type mockWsCheckpoint func() (stateRoot [32]byte, blockRoot [32]byte, e types.Epoch)
|
||||
|
||||
func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
epoch types.Epoch
|
||||
genWsState func() state.ReadOnlyBeaconState
|
||||
genWsCheckpoint func() *ethpb.WeakSubjectivityCheckpoint
|
||||
genWsCheckpoint mockWsCheckpoint
|
||||
want bool
|
||||
wantedErr string
|
||||
}{
|
||||
@@ -68,22 +71,8 @@ func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
|
||||
genWsState: func() state.ReadOnlyBeaconState {
|
||||
return nil
|
||||
},
|
||||
genWsCheckpoint: func() *ethpb.WeakSubjectivityCheckpoint {
|
||||
return ðpb.WeakSubjectivityCheckpoint{
|
||||
BlockRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
Epoch: 42,
|
||||
}
|
||||
},
|
||||
wantedErr: "invalid weak subjectivity state or checkpoint",
|
||||
},
|
||||
{
|
||||
name: "nil weak subjectivity checkpoint",
|
||||
genWsState: func() state.ReadOnlyBeaconState {
|
||||
return genState(t, 128, 32)
|
||||
},
|
||||
genWsCheckpoint: func() *ethpb.WeakSubjectivityCheckpoint {
|
||||
return nil
|
||||
genWsCheckpoint: func() ([32]byte, [32]byte, types.Epoch) {
|
||||
return [32]byte{}, [32]byte{}, 42
|
||||
},
|
||||
wantedErr: "invalid weak subjectivity state or checkpoint",
|
||||
},
|
||||
@@ -99,11 +88,10 @@ func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return beaconState
|
||||
},
|
||||
genWsCheckpoint: func() *ethpb.WeakSubjectivityCheckpoint {
|
||||
return ðpb.WeakSubjectivityCheckpoint{
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot2"), 32),
|
||||
Epoch: 42,
|
||||
}
|
||||
genWsCheckpoint: func() ([32]byte, [32]byte, types.Epoch) {
|
||||
var sr [32]byte
|
||||
copy(sr[:], bytesutil.PadTo([]byte("stateroot2"), 32))
|
||||
return sr, [32]byte{}, 42
|
||||
},
|
||||
wantedErr: fmt.Sprintf("state (%#x) and checkpoint (%#x) roots do not match",
|
||||
bytesutil.PadTo([]byte("stateroot1"), 32), bytesutil.PadTo([]byte("stateroot2"), 32)),
|
||||
@@ -120,11 +108,10 @@ func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return beaconState
|
||||
},
|
||||
genWsCheckpoint: func() *ethpb.WeakSubjectivityCheckpoint {
|
||||
return ðpb.WeakSubjectivityCheckpoint{
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot"), 32),
|
||||
Epoch: 43,
|
||||
}
|
||||
genWsCheckpoint: func() ([32]byte, [32]byte, types.Epoch) {
|
||||
var sr [32]byte
|
||||
copy(sr[:], bytesutil.PadTo([]byte("stateroot"), 32))
|
||||
return sr, [32]byte{}, 43
|
||||
},
|
||||
wantedErr: "state (42) and checkpoint (43) epochs do not match",
|
||||
},
|
||||
@@ -140,11 +127,10 @@ func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return beaconState
|
||||
},
|
||||
genWsCheckpoint: func() *ethpb.WeakSubjectivityCheckpoint {
|
||||
return ðpb.WeakSubjectivityCheckpoint{
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot"), 32),
|
||||
Epoch: 42,
|
||||
}
|
||||
genWsCheckpoint: func() ([32]byte, [32]byte, types.Epoch) {
|
||||
var sr [32]byte
|
||||
copy(sr[:], bytesutil.PadTo([]byte("stateroot"), 32))
|
||||
return sr, [32]byte{}, 42
|
||||
},
|
||||
wantedErr: "cannot compute weak subjectivity period: no active validators found",
|
||||
},
|
||||
@@ -161,11 +147,10 @@ func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return beaconState
|
||||
},
|
||||
genWsCheckpoint: func() *ethpb.WeakSubjectivityCheckpoint {
|
||||
return ðpb.WeakSubjectivityCheckpoint{
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot"), 32),
|
||||
Epoch: 42,
|
||||
}
|
||||
genWsCheckpoint: func() ([32]byte, [32]byte, types.Epoch) {
|
||||
var sr [32]byte
|
||||
copy(sr[:], bytesutil.PadTo([]byte("stateroot"), 32))
|
||||
return sr, [32]byte{}, 42
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
@@ -182,18 +167,18 @@ func TestWeakSubjectivity_IsWithinWeakSubjectivityPeriod(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return beaconState
|
||||
},
|
||||
genWsCheckpoint: func() *ethpb.WeakSubjectivityCheckpoint {
|
||||
return ðpb.WeakSubjectivityCheckpoint{
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot"), 32),
|
||||
Epoch: 42,
|
||||
}
|
||||
genWsCheckpoint: func() ([32]byte, [32]byte, types.Epoch) {
|
||||
var sr [32]byte
|
||||
copy(sr[:], bytesutil.PadTo([]byte("stateroot"), 32))
|
||||
return sr, [32]byte{}, 42
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := helpers.IsWithinWeakSubjectivityPeriod(context.Background(), tt.epoch, tt.genWsState(), tt.genWsCheckpoint())
|
||||
sr, _, e := tt.genWsCheckpoint()
|
||||
got, err := helpers.IsWithinWeakSubjectivityPeriod(context.Background(), tt.epoch, tt.genWsState(), sr, e)
|
||||
if tt.wantedErr != "" {
|
||||
assert.Equal(t, false, got)
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
|
||||
@@ -17,13 +17,15 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["slot_epoch_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -46,9 +46,9 @@ func NextEpoch(state state.ReadOnlyBeaconState) types.Epoch {
|
||||
return slots.ToEpoch(state.Slot()) + 1
|
||||
}
|
||||
|
||||
// AltairCompatible returns if the input state `s` is altair compatible and input epoch `e` is higher equal than fork epoch.
|
||||
func AltairCompatible(s state.BeaconState, e types.Epoch) bool {
|
||||
return s.Version() == version.Altair && e >= params.BeaconConfig().AltairForkEpoch
|
||||
// HigherEqualThanAltairVersionAndEpoch returns if the input state `s` has a higher version number than Altair state and input epoch `e` is higher equal than fork epoch.
|
||||
func HigherEqualThanAltairVersionAndEpoch(s state.BeaconState, e types.Epoch) bool {
|
||||
return s.Version() >= version.Altair && e >= params.BeaconConfig().AltairForkEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToAltair returns true if the input `slot` can upgrade to Altair.
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
package time
|
||||
package time_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
@@ -42,7 +45,7 @@ func TestCurrentEpoch_OK(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
state, err := v1.InitializeFromProto(ð.BeaconState{Slot: tt.slot})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.epoch, CurrentEpoch(state), "ActiveCurrentEpoch(%d)", state.Slot())
|
||||
assert.Equal(t, tt.epoch, time.CurrentEpoch(state), "ActiveCurrentEpoch(%d)", state.Slot())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,7 +61,7 @@ func TestPrevEpoch_OK(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
state, err := v1.InitializeFromProto(ð.BeaconState{Slot: tt.slot})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.epoch, PrevEpoch(state), "ActivePrevEpoch(%d)", state.Slot())
|
||||
assert.Equal(t, tt.epoch, time.PrevEpoch(state), "ActivePrevEpoch(%d)", state.Slot())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,7 +79,7 @@ func TestNextEpoch_OK(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
state, err := v1.InitializeFromProto(ð.BeaconState{Slot: tt.slot})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.epoch, NextEpoch(state), "NextEpoch(%d)", state.Slot())
|
||||
assert.Equal(t, tt.epoch, time.NextEpoch(state), "NextEpoch(%d)", state.Slot())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,7 +111,7 @@ func TestCanUpgradeToAltair(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := CanUpgradeToAltair(tt.slot); got != tt.want {
|
||||
if got := time.CanUpgradeToAltair(tt.slot); got != tt.want {
|
||||
t.Errorf("canUpgradeToAltair() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
@@ -143,7 +146,7 @@ func TestCanUpgradeBellatrix(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := CanUpgradeToBellatrix(tt.slot); got != tt.want {
|
||||
if got := time.CanUpgradeToBellatrix(tt.slot); got != tt.want {
|
||||
t.Errorf("CanUpgradeToBellatrix() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
@@ -178,6 +181,85 @@ func TestCanProcessEpoch_TrueOnEpochsLastSlot(t *testing.T) {
|
||||
b := ð.BeaconState{Slot: tt.slot}
|
||||
s, err := v1.InitializeFromProto(b)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.canProcessEpoch, CanProcessEpoch(s), "CanProcessEpoch(%d)", tt.slot)
|
||||
assert.Equal(t, tt.canProcessEpoch, time.CanProcessEpoch(s), "CanProcessEpoch(%d)", tt.slot)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAltairCompatible(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
type args struct {
|
||||
s state.BeaconState
|
||||
e types.Epoch
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "phase0 state",
|
||||
args: args{
|
||||
s: func() state.BeaconState {
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
return st
|
||||
}(),
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "altair state, altair epoch",
|
||||
args: args{
|
||||
s: func() state.BeaconState {
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, 1)
|
||||
return st
|
||||
}(),
|
||||
e: params.BeaconConfig().AltairForkEpoch,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "bellatrix state, bellatrix epoch",
|
||||
args: args{
|
||||
s: func() state.BeaconState {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
return st
|
||||
}(),
|
||||
e: params.BeaconConfig().BellatrixForkEpoch,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "bellatrix state, altair epoch",
|
||||
args: args{
|
||||
s: func() state.BeaconState {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
return st
|
||||
}(),
|
||||
e: params.BeaconConfig().AltairForkEpoch,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "bellatrix state, phase0 epoch",
|
||||
args: args{
|
||||
s: func() state.BeaconState {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
return st
|
||||
}(),
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := time.HigherEqualThanAltairVersionAndEpoch(tt.args.s, tt.args.e); got != tt.want {
|
||||
t.Errorf("HigherEqualThanAltairVersionAndEpoch() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,7 +92,7 @@ func ExecuteStateTransition(
|
||||
func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ProcessSlot")
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.Int64Attribute("slot", int64(state.Slot())))
|
||||
span.AddAttributes(trace.Int64Attribute("slot", int64(state.Slot()))) // lint:ignore uintcast -- This is OK for tracing.
|
||||
|
||||
prevStateRoot, err := state.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
@@ -190,7 +190,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
|
||||
if state == nil || state.IsNil() {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
span.AddAttributes(trace.Int64Attribute("slots", int64(slot)-int64(state.Slot())))
|
||||
span.AddAttributes(trace.Int64Attribute("slots", int64(slot)-int64(state.Slot()))) // lint:ignore uintcast -- This is OK for tracing.
|
||||
|
||||
// The block must have a higher slot than parent state.
|
||||
if state.Slot() >= slot {
|
||||
@@ -354,7 +354,7 @@ func VerifyOperationLengths(_ context.Context, state state.BeaconState, b block.
|
||||
func ProcessEpochPrecompute(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ProcessEpochPrecompute")
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.Int64Attribute("epoch", int64(time.CurrentEpoch(state))))
|
||||
span.AddAttributes(trace.Int64Attribute("epoch", int64(time.CurrentEpoch(state)))) // lint:ignore uintcast -- This is OK for tracing.
|
||||
|
||||
if state == nil || state.IsNil() {
|
||||
return nil, errors.New("nil state")
|
||||
|
||||
@@ -50,9 +50,10 @@ type ReadOnlyDatabase interface {
|
||||
// Powchain operations.
|
||||
PowchainData(ctx context.Context) (*ethpb.ETH1ChainData, error)
|
||||
// Fee reicipients operations.
|
||||
FeeRecipientByValidatorID(ctx context.Context, id uint64) (common.Address, error)
|
||||
FeeRecipientByValidatorID(ctx context.Context, id types.ValidatorIndex) (common.Address, error)
|
||||
// origin checkpoint sync support
|
||||
OriginBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
LowestSyncedBlockSlot(ctx context.Context) (types.Slot, error)
|
||||
}
|
||||
|
||||
// NoHeadAccessDatabase defines a struct without access to chain head data.
|
||||
@@ -81,7 +82,7 @@ type NoHeadAccessDatabase interface {
|
||||
// Run any required database migrations.
|
||||
RunMigrations(ctx context.Context) error
|
||||
// Fee reicipients operations.
|
||||
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []uint64, addrs []common.Address) error
|
||||
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, addrs []common.Address) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint types.Slot) error
|
||||
}
|
||||
@@ -100,7 +101,7 @@ type HeadAccessDatabase interface {
|
||||
EnsureEmbeddedGenesis(ctx context.Context) error
|
||||
|
||||
// initialization method needed for origin checkpoint sync
|
||||
SaveOrigin(ctx context.Context, state io.Reader, block io.Reader) error
|
||||
SaveOrigin(ctx context.Context, state []byte, block []byte) error
|
||||
}
|
||||
|
||||
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.
|
||||
|
||||
@@ -49,6 +49,7 @@ go_library(
|
||||
"//io/file:go_default_library",
|
||||
"//monitoring/progress:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/detect:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//proto/prysm/v1alpha1/wrapper:go_default_library",
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
|
||||
// used to represent errors for inconsistent slot ranges.
|
||||
var errInvalidSlotRange = errors.New("invalid end slot and start slot provided")
|
||||
var errEmptyBucket = errors.New("unexpectedly empty bucket")
|
||||
|
||||
// Block retrieval by root.
|
||||
func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (block.SignedBeaconBlock, error) {
|
||||
@@ -47,18 +48,18 @@ func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (block.SignedBeac
|
||||
return blk, err
|
||||
}
|
||||
|
||||
// OriginBlockRoot returns the value written to the db in SaveOriginBlockRoot
|
||||
// OriginCheckpointBlockRoot returns the value written to the db in SaveOriginCheckpointBlockRoot
|
||||
// This is the root of a finalized block within the weak subjectivity period
|
||||
// at the time the chain was started, used to initialize the database and chain
|
||||
// without syncing from genesis.
|
||||
func (s *Store) OriginBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.OriginBlockRoot")
|
||||
func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.OriginCheckpointBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
var root [32]byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
rootSlice := bkt.Get(originBlockRootKey)
|
||||
rootSlice := bkt.Get(originCheckpointBlockRootKey)
|
||||
if rootSlice == nil {
|
||||
return ErrNotFoundOriginBlockRoot
|
||||
}
|
||||
@@ -69,6 +70,26 @@ func (s *Store) OriginBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
return root, err
|
||||
}
|
||||
|
||||
// LowestSyncedBlockSlot returns the lowest slot number in the block root index
|
||||
func (s *Store) LowestSyncedBlockSlot(ctx context.Context) (types.Slot, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LowestSyncedBlockSlot")
|
||||
defer span.End()
|
||||
// start scan at zero, which will yield the lowest key in the collection.
|
||||
var slot types.Slot
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
// Iterate through the index, which is in byte sorted order.
|
||||
bkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
c := bkt.Cursor()
|
||||
k, _ := c.First()
|
||||
if k == nil {
|
||||
return errors.Wrapf(errEmptyBucket, "'%s' bucket is empty", string(blockSlotIndicesBucket))
|
||||
}
|
||||
slot = bytesutil.BytesToSlotBigEndian(k)
|
||||
return nil
|
||||
})
|
||||
return slot, err
|
||||
}
|
||||
|
||||
// HeadBlock returns the latest canonical block in the Ethereum Beacon Chain.
|
||||
func (s *Store) HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock")
|
||||
@@ -334,16 +355,16 @@ func (s *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) er
|
||||
})
|
||||
}
|
||||
|
||||
// SaveOriginBlockRoot is used to keep track of the block root used for origin sync.
|
||||
// SaveOriginCheckpointBlockRoot is used to keep track of the block root used for syncing from a checkpoint origin.
|
||||
// This should be a finalized block from within the current weak subjectivity period.
|
||||
// This value is used by a running beacon chain node to locate the state at the beginning
|
||||
// of the chain history, in places where genesis would typically be used.
|
||||
func (s *Store) SaveOriginBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveOriginBlockRoot")
|
||||
func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveOriginCheckpointBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
return bucket.Put(originBlockRootKey, blockRoot[:])
|
||||
return bucket.Put(originCheckpointBlockRootKey, blockRoot[:])
|
||||
})
|
||||
}
|
||||
|
||||
@@ -395,13 +416,13 @@ func (s *Store) HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]
|
||||
|
||||
// FeeRecipientByValidatorID returns the fee recipient for a validator id.
|
||||
// `ErrNotFoundFeeRecipient` is returned if the validator id is not found.
|
||||
func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id uint64) (common.Address, error) {
|
||||
func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id types.ValidatorIndex) (common.Address, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.FeeRecipientByValidatorID")
|
||||
defer span.End()
|
||||
var addr []byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(feeRecipientBucket)
|
||||
addr = bkt.Get(bytesutil.Uint64ToBytesBigEndian(id))
|
||||
addr = bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
|
||||
if addr == nil {
|
||||
return errors.Wrapf(ErrNotFoundFeeRecipient, "validator id %d", id)
|
||||
}
|
||||
@@ -412,7 +433,7 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id uint64) (commo
|
||||
|
||||
// SaveFeeRecipientsByValidatorIDs saves the fee recipients for validator ids.
|
||||
// Error is returned if `ids` and `recipients` are not the same length.
|
||||
func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []uint64, feeRecipients []common.Address) error {
|
||||
func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, feeRecipients []common.Address) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveFeeRecipientByValidatorID")
|
||||
defer span.End()
|
||||
|
||||
@@ -423,7 +444,7 @@ func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []uint6
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(feeRecipientBucket)
|
||||
for i, id := range ids {
|
||||
if err := bkt.Put(bytesutil.Uint64ToBytesBigEndian(id), feeRecipients[i].Bytes()); err != nil {
|
||||
if err := bkt.Put(bytesutil.Uint64ToBytesBigEndian(uint64(id)), feeRecipients[i].Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,6 +58,30 @@ var blockTests = []struct {
|
||||
},
|
||||
}
|
||||
|
||||
func TestStore_LowestSyncedBlockSlot(t *testing.T) {
|
||||
// test case: make sure expected error is returned w/ an empty bucket
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
_, err := db.LowestSyncedBlockSlot(ctx)
|
||||
require.ErrorIs(t, err, errEmptyBucket)
|
||||
|
||||
// test case: make sure that the right value is returned using a non-zero key
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = types.Slot(1 << 32)
|
||||
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block)))
|
||||
slot, err := db.LowestSyncedBlockSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, block.Block.Slot)
|
||||
|
||||
// test case: write a lower value and make sure that it is returned instead of the existing value
|
||||
zero := util.NewBeaconBlock()
|
||||
zero.Block.Slot = types.Slot(0)
|
||||
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(zero)))
|
||||
zslot, err := db.LowestSyncedBlockSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, zslot, zero.Block.Slot)
|
||||
}
|
||||
|
||||
func TestStore_SaveBlock_NoDuplicates(t *testing.T) {
|
||||
BlockCacheSize = 1
|
||||
slot := types.Slot(20)
|
||||
@@ -595,11 +619,11 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
func TestStore_FeeRecipientByValidatorID(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
ids := []uint64{0, 0, 0}
|
||||
ids := []types.ValidatorIndex{0, 0, 0}
|
||||
feeRecipients := []common.Address{{}, {}, {}, {}}
|
||||
require.ErrorContains(t, "validatorIDs and feeRecipients must be the same length", db.SaveFeeRecipientsByValidatorIDs(ctx, ids, feeRecipients))
|
||||
|
||||
ids = []uint64{0, 1, 2}
|
||||
ids = []types.ValidatorIndex{0, 1, 2}
|
||||
feeRecipients = []common.Address{{'a'}, {'b'}, {'c'}}
|
||||
require.NoError(t, db.SaveFeeRecipientsByValidatorIDs(ctx, ids, feeRecipients))
|
||||
f, err := db.FeeRecipientByValidatorID(ctx, 0)
|
||||
|
||||
@@ -46,6 +46,7 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
|
||||
root := checkpoint.Root
|
||||
var previousRoot []byte
|
||||
genesisRoot := tx.Bucket(blocksBucket).Get(genesisBlockRootKey)
|
||||
initCheckpointRoot := tx.Bucket(blocksBucket).Get(originCheckpointBlockRootKey)
|
||||
|
||||
// De-index recent finalized block roots, to be re-indexed.
|
||||
previousFinalizedCheckpoint := ðpb.Checkpoint{}
|
||||
@@ -74,7 +75,7 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
|
||||
// Walk up the ancestry chain until we reach a block root present in the finalized block roots
|
||||
// index bucket or genesis block root.
|
||||
for {
|
||||
if bytes.Equal(root, genesisRoot) {
|
||||
if bytes.Equal(root, genesisRoot) || bytes.Equal(root, initCheckpointRoot) {
|
||||
break
|
||||
}
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ var (
|
||||
altairKey = []byte("altair")
|
||||
bellatrixKey = []byte("merge")
|
||||
// block root included in the beacon state used by weak subjectivity initial sync
|
||||
originBlockRootKey = []byte("origin-block-root")
|
||||
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
|
||||
|
||||
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
|
||||
lastArchivedIndexKey = []byte("last-archived")
|
||||
|
||||
@@ -2,56 +2,58 @@ package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
statev2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/proto/detect"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
)
|
||||
|
||||
// SaveOrigin loads an ssz serialized Block & BeaconState from an io.Reader
|
||||
// (ex: an open file) prepares the database so that the beacon node can begin
|
||||
// syncing, using the provided values as their point of origin. This is an alternative
|
||||
// to syncing from genesis, and should only be run on an empty database.
|
||||
func (s *Store) SaveOrigin(ctx context.Context, stateReader, blockReader io.Reader) error {
|
||||
// unmarshal both block and state before trying to save anything
|
||||
// so that we fail early if there is any issue with the ssz data
|
||||
blk := ðpb.SignedBeaconBlockAltair{}
|
||||
bb, err := ioutil.ReadAll(blockReader)
|
||||
func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error {
|
||||
cf, err := detect.ByState(serState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error reading block given to SaveOrigin")
|
||||
return errors.Wrap(err, "could not sniff config+fork for origin state bytes")
|
||||
}
|
||||
if err := blk.UnmarshalSSZ(bb); err != nil {
|
||||
return errors.Wrap(err, "could not unmarshal checkpoint block")
|
||||
}
|
||||
wblk, err := wrapper.WrappedAltairSignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not wrap checkpoint block")
|
||||
}
|
||||
bs, err := statev2.InitializeFromSSZReader(stateReader)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize checkpoint state from reader")
|
||||
_, ok := params.BeaconConfig().ForkVersionSchedule[cf.Version]
|
||||
if !ok {
|
||||
return fmt.Errorf("config mismatch, beacon node configured to connect to %s, detected state is for %s", params.BeaconConfig().ConfigName, cf.ConfigName.String())
|
||||
}
|
||||
|
||||
// save block
|
||||
if err := s.SaveBlock(ctx, wblk); err != nil {
|
||||
return errors.Wrap(err, "could not save checkpoint block")
|
||||
log.Printf("detected supported config for state & block version detection, name=%s, fork=%s", cf.ConfigName.String(), cf.Fork)
|
||||
state, err := cf.UnmarshalBeaconState(serState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to initialize origin state w/ bytes + config+fork")
|
||||
}
|
||||
blockRoot, err := blk.Block.HashTreeRoot()
|
||||
|
||||
wblk, err := cf.UnmarshalBeaconBlock(serBlock)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to initialize origin block w/ bytes + config+fork")
|
||||
}
|
||||
blk := wblk.Block()
|
||||
|
||||
// save block
|
||||
blockRoot, err := blk.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute HashTreeRoot of checkpoint block")
|
||||
}
|
||||
log.Infof("saving checkpoint block to db, w/ root=%#x", blockRoot)
|
||||
if err := s.SaveBlock(ctx, wblk); err != nil {
|
||||
return errors.Wrap(err, "could not save checkpoint block")
|
||||
}
|
||||
|
||||
// save state
|
||||
if err = s.SaveState(ctx, bs, blockRoot); err != nil {
|
||||
log.Infof("calling SaveState w/ blockRoot=%x", blockRoot)
|
||||
if err = s.SaveState(ctx, state, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
if err = s.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: bs.Slot(),
|
||||
Slot: state.Slot(),
|
||||
Root: blockRoot[:],
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "could not save state summary")
|
||||
@@ -59,7 +61,7 @@ func (s *Store) SaveOrigin(ctx context.Context, stateReader, blockReader io.Read
|
||||
|
||||
// save origin block root in special key, to be used when the canonical
|
||||
// origin (start of chain, ie alternative to genesis) block or state is needed
|
||||
if err = s.SaveOriginBlockRoot(ctx, blockRoot); err != nil {
|
||||
if err = s.SaveOriginCheckpointBlockRoot(ctx, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save origin block root")
|
||||
}
|
||||
|
||||
@@ -70,7 +72,7 @@ func (s *Store) SaveOrigin(ctx context.Context, stateReader, blockReader io.Read
|
||||
|
||||
// rebuild the checkpoint from the block
|
||||
// use it to mark the block as justified and finalized
|
||||
slotEpoch, err := blk.Block.Slot.SafeDivSlot(params.BeaconConfig().SlotsPerEpoch)
|
||||
slotEpoch, err := blk.Slot().SafeDivSlot(params.BeaconConfig().SlotsPerEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -27,9 +27,9 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
|
||||
// 2 <- justified: 1, finalized: 0
|
||||
// |
|
||||
// 3 <- justified: 2, finalized: 1
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(3), indexToHash(2), 2, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(3), indexToHash(2), 2, 1))
|
||||
|
||||
// With starting justified epoch at 0, the head should be 3:
|
||||
// 0 <- start
|
||||
@@ -89,17 +89,17 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
|
||||
// | |
|
||||
// justified: 2, finalized: 0 -> 9 10 <- justified: 2, finalized: 0
|
||||
// Left branch.
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(3), indexToHash(1), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(5), indexToHash(3), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(7), indexToHash(5), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(9), indexToHash(7), 2, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(3), indexToHash(1), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(5), indexToHash(3), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(7), indexToHash(5), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(9), indexToHash(7), 2, 0))
|
||||
// Right branch.
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(4), indexToHash(2), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(6), indexToHash(4), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(8), indexToHash(6), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(10), indexToHash(8), 2, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(4), indexToHash(2), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(6), indexToHash(4), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(8), indexToHash(6), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(10), indexToHash(8), 2, 0))
|
||||
|
||||
// With start at 0, the head should be 10:
|
||||
// 0 <-- start
|
||||
@@ -185,7 +185,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
|
||||
func setup(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
|
||||
ctx := context.Background()
|
||||
f := New(justifiedEpoch, finalizedEpoch)
|
||||
err := f.ProcessBlock(ctx, 0, params.BeaconConfig().ZeroHash, [32]byte{}, justifiedEpoch, finalizedEpoch, false)
|
||||
err := f.InsertOptimisticBlock(ctx, 0, params.BeaconConfig().ZeroHash, [32]byte{}, justifiedEpoch, finalizedEpoch)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -102,17 +102,17 @@ func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []
|
||||
processedAttestationCount.Inc()
|
||||
}
|
||||
|
||||
// ProcessBlock processes a new block by inserting it to the fork choice store.
|
||||
func (f *ForkChoice) ProcessBlock(
|
||||
// InsertOptimisticBlock processes a new block by inserting it to the fork choice store.
|
||||
func (f *ForkChoice) InsertOptimisticBlock(
|
||||
ctx context.Context,
|
||||
slot types.Slot,
|
||||
blockRoot, parentRoot [fieldparams.RootLength]byte,
|
||||
justifiedEpoch, finalizedEpoch types.Epoch, optimistic bool,
|
||||
justifiedEpoch, finalizedEpoch types.Epoch,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.ProcessBlock")
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.InsertOptimisticBlock")
|
||||
defer span.End()
|
||||
|
||||
return f.store.insert(ctx, slot, blockRoot, parentRoot, justifiedEpoch, finalizedEpoch, optimistic)
|
||||
return f.store.insert(ctx, slot, blockRoot, parentRoot, justifiedEpoch, finalizedEpoch)
|
||||
}
|
||||
|
||||
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
|
||||
|
||||
@@ -15,9 +15,9 @@ import (
|
||||
func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0))
|
||||
|
||||
f.votes = []Vote{
|
||||
{indexToHash(1), indexToHash(1), 0},
|
||||
@@ -37,9 +37,9 @@ func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
|
||||
func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0))
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
@@ -61,12 +61,12 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
func TestForkChoice_IsCanonical(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(1), 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 4, indexToHash(4), indexToHash(2), 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(5), indexToHash(4), 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 6, indexToHash(6), indexToHash(5), 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(1), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(2), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(4), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 6, indexToHash(6), indexToHash(5), 1, 1))
|
||||
|
||||
require.Equal(t, true, f.IsCanonical(params.BeaconConfig().ZeroHash))
|
||||
require.Equal(t, false, f.IsCanonical(indexToHash(1)))
|
||||
@@ -80,12 +80,12 @@ func TestForkChoice_IsCanonical(t *testing.T) {
|
||||
func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, [32]byte{'2'}, params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 4, [32]byte{'4'}, [32]byte{'2'}, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 5, [32]byte{'5'}, [32]byte{'4'}, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 6, [32]byte{'6'}, [32]byte{'5'}, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, [32]byte{'2'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, [32]byte{'4'}, [32]byte{'2'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, [32]byte{'5'}, [32]byte{'4'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 6, [32]byte{'6'}, [32]byte{'5'}, 1, 1))
|
||||
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodeByRoot[[32]byte{'3'}].balance = 10
|
||||
@@ -114,9 +114,9 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
func TestForkChoice_AncestorRoot(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(3), indexToHash(2), 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(3), indexToHash(2), 1, 1))
|
||||
f.store.treeRootNode = f.store.nodeByRoot[indexToHash(1)]
|
||||
f.store.treeRootNode.parent = nil
|
||||
|
||||
@@ -140,8 +140,8 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
|
||||
func TestForkChoice_AncestorEqualSlot(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'3'}, [32]byte{'1'}, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'3'}, [32]byte{'1'}, 1, 1))
|
||||
|
||||
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 100)
|
||||
require.NoError(t, err)
|
||||
@@ -152,8 +152,8 @@ func TestForkChoice_AncestorEqualSlot(t *testing.T) {
|
||||
func TestForkChoice_AncestorLowerSlot(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 200, [32]byte{'3'}, [32]byte{'1'}, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 200, [32]byte{'3'}, [32]byte{'1'}, 1, 1))
|
||||
|
||||
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 150)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -48,10 +48,10 @@ var (
|
||||
Help: "The number of times pruning happened.",
|
||||
},
|
||||
)
|
||||
optimisticCount = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "doublylinkedtree_optimistic_count",
|
||||
Help: "The number of blocks that have been optimistically synced.",
|
||||
validatedCount = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "doublylinkedtree_validated_count",
|
||||
Help: "The number of blocks that have been fully validated.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 0
|
||||
// /
|
||||
// 2 <- head
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -33,7 +33,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 0
|
||||
// / \
|
||||
// head -> 2 1
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -44,7 +44,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// head -> 2 1
|
||||
// |
|
||||
// 3
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -55,7 +55,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 2 1
|
||||
// | |
|
||||
// head -> 4 3
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(4), indexToHash(2), 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(4), indexToHash(2), 1, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -68,7 +68,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// head -> 4 3
|
||||
// |
|
||||
// 5 <- justified epoch = 2
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -107,7 +107,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 5
|
||||
// |
|
||||
// 6 <- head
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(5), 2, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(5), 2, 1))
|
||||
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 2")
|
||||
|
||||
@@ -120,6 +120,7 @@ func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
|
||||
}
|
||||
|
||||
n.optimistic = false
|
||||
validatedCount.Inc()
|
||||
return n.parent.setNodeAndParentValidated(ctx)
|
||||
}
|
||||
|
||||
|
||||
@@ -13,9 +13,9 @@ import (
|
||||
func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0))
|
||||
|
||||
// The updated balances of each node is 100
|
||||
s := f.store
|
||||
@@ -36,9 +36,9 @@ func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
|
||||
func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 0, 0))
|
||||
|
||||
// The updated balances of each node is 100
|
||||
s := f.store
|
||||
@@ -63,7 +63,7 @@ func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is not viable.
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 2, 3, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 2, 3))
|
||||
|
||||
// Verify parent's best child and best descendant are `none`.
|
||||
s := f.store
|
||||
@@ -76,7 +76,7 @@ func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is best descendant
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
|
||||
s := f.store
|
||||
assert.Equal(t, 1, len(s.treeRootNode.children))
|
||||
@@ -87,8 +87,8 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is best descendant
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 100
|
||||
@@ -103,8 +103,8 @@ func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is best descendant
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 200
|
||||
@@ -119,9 +119,9 @@ func TestNode_TestDepth(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is best descendant
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
|
||||
s := f.store
|
||||
require.Equal(t, s.nodeByRoot[indexToHash(2)].depth(), uint64(2))
|
||||
@@ -151,11 +151,11 @@ func TestNode_ViableForHead(t *testing.T) {
|
||||
func TestNode_LeadsToViableHead(t *testing.T) {
|
||||
f := setup(4, 3)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(1), 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 4, indexToHash(4), indexToHash(2), 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(5), indexToHash(3), 4, 3, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(1), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(2), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(3), 4, 3))
|
||||
|
||||
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 3))
|
||||
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 3))
|
||||
@@ -172,11 +172,13 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
// \
|
||||
// -- 5 (true)
|
||||
//
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 4, indexToHash(4), indexToHash(3), 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 5, indexToHash(5), indexToHash(1), 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, params.BeaconConfig().ZeroHash))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1))
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(1)))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(3), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(1), 1, 1))
|
||||
|
||||
opt, err := f.IsOptimistic(ctx, indexToHash(5))
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -51,18 +51,18 @@ func TestPruneInvalid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
|
||||
|
||||
require.NoError(t, f.store.removeNode(context.Background(), tc.root))
|
||||
require.Equal(t, tc.wantedNodeNumber, f.NodeCount())
|
||||
|
||||
@@ -41,14 +41,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot := types.Slot(1)
|
||||
newRoot := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{0}, newRoot, fEpoch)
|
||||
@@ -65,14 +64,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot = types.Slot(2)
|
||||
newRoot = indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{1}, newRoot, fEpoch)
|
||||
@@ -91,14 +89,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot = types.Slot(3)
|
||||
newRoot = indexToHash(3)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{2}, newRoot, fEpoch)
|
||||
@@ -117,14 +114,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot = types.Slot(3)
|
||||
newRoot = indexToHash(4)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
|
||||
@@ -187,14 +183,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
honestBlockSlot := types.Slot(2)
|
||||
honestBlock := indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
honestBlockSlot,
|
||||
honestBlock,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
|
||||
@@ -204,14 +199,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
maliciouslyWithheldBlockSlot := types.Slot(1)
|
||||
maliciouslyWithheldBlock := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
maliciouslyWithheldBlockSlot,
|
||||
maliciouslyWithheldBlock,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -253,14 +247,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
honestBlockSlot := types.Slot(2)
|
||||
honestBlock := indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
honestBlockSlot,
|
||||
honestBlock,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -272,14 +265,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
maliciouslyWithheldBlockSlot := types.Slot(1)
|
||||
maliciouslyWithheldBlock := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
maliciouslyWithheldBlockSlot,
|
||||
maliciouslyWithheldBlock,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -328,14 +320,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
cSlot := types.Slot(2)
|
||||
c := indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
cSlot,
|
||||
c,
|
||||
a, // parent
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -352,14 +343,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
bSlot := types.Slot(1)
|
||||
b := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
bSlot,
|
||||
b,
|
||||
a, // parent
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -376,14 +366,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
dSlot := types.Slot(3)
|
||||
d := indexToHash(3)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
dSlot,
|
||||
d,
|
||||
b, // parent
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
true,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@@ -97,7 +97,7 @@ func (s *Store) head(ctx context.Context, justifiedRoot [32]byte) ([32]byte, err
|
||||
func (s *Store) insert(ctx context.Context,
|
||||
slot types.Slot,
|
||||
root, parentRoot [fieldparams.RootLength]byte,
|
||||
justifiedEpoch, finalizedEpoch types.Epoch, optimistic bool) error {
|
||||
justifiedEpoch, finalizedEpoch types.Epoch) error {
|
||||
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
|
||||
defer span.End()
|
||||
|
||||
@@ -117,7 +117,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
parent: parent,
|
||||
justifiedEpoch: justifiedEpoch,
|
||||
finalizedEpoch: finalizedEpoch,
|
||||
optimistic: optimistic,
|
||||
optimistic: true,
|
||||
}
|
||||
|
||||
s.nodeByRoot[root] = n
|
||||
@@ -128,14 +128,6 @@ func (s *Store) insert(ctx context.Context,
|
||||
}
|
||||
}
|
||||
|
||||
if !optimistic {
|
||||
if err := n.setNodeAndParentValidated(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
optimisticCount.Inc()
|
||||
}
|
||||
|
||||
// Set the node as root if the store was empty
|
||||
if s.treeRootNode == nil {
|
||||
s.treeRootNode = n
|
||||
|
||||
@@ -33,14 +33,14 @@ func TestStore_FinalizedEpoch(t *testing.T) {
|
||||
|
||||
func TestStore_NodeCount(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.Equal(t, 2, f.NodeCount())
|
||||
}
|
||||
|
||||
func TestStore_NodeByRoot(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 0, 0))
|
||||
node0 := f.store.treeRootNode
|
||||
node1 := node0.children[0]
|
||||
node2 := node1.children[0]
|
||||
@@ -61,7 +61,7 @@ func TestStore_NodeByRoot(t *testing.T) {
|
||||
|
||||
func TestForkChoice_HasNode(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.Equal(t, true, f.HasNode(indexToHash(1)))
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ func TestStore_Head_UnknownJustifiedRoot(t *testing.T) {
|
||||
|
||||
func TestStore_Head_Itself(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
|
||||
// Since the justified node does not have a best descendant so the best node
|
||||
// is itself.
|
||||
@@ -85,10 +85,10 @@ func TestStore_Head_Itself(t *testing.T) {
|
||||
|
||||
func TestStore_Head_BestDescendant(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(3), indexToHash(1), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(4), indexToHash(2), 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(3), indexToHash(1), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(4), indexToHash(2), 0, 0))
|
||||
h, err := f.store.head(context.Background(), indexToHash(1))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, h, indexToHash(4))
|
||||
@@ -97,9 +97,9 @@ func TestStore_Head_BestDescendant(t *testing.T) {
|
||||
func TestStore_UpdateBestDescendant_ContextCancelled(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
f := setup(0, 0)
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
cancel()
|
||||
err := f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0, false)
|
||||
err := f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 0, 0)
|
||||
require.ErrorContains(t, "context canceled", err)
|
||||
}
|
||||
|
||||
@@ -108,7 +108,7 @@ func TestStore_Insert(t *testing.T) {
|
||||
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
|
||||
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
|
||||
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode}
|
||||
require.NoError(t, s.insert(context.Background(), 100, indexToHash(100), indexToHash(0), 1, 1, false))
|
||||
require.NoError(t, s.insert(context.Background(), 100, indexToHash(100), indexToHash(0), 1, 1))
|
||||
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")
|
||||
assert.Equal(t, (*Node)(nil), treeRootNode.parent, "Incorrect parent")
|
||||
assert.Equal(t, 1, len(treeRootNode.children), "Incorrect children number")
|
||||
@@ -132,9 +132,9 @@ func TestStore_Prune_LessThanThreshold(t *testing.T) {
|
||||
numOfNodes := uint64(100)
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
for i := uint64(2); i < numOfNodes; i++ {
|
||||
require.NoError(t, f.ProcessBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0))
|
||||
}
|
||||
|
||||
s := f.store
|
||||
@@ -151,9 +151,9 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
|
||||
numOfNodes := uint64(100)
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
for i := uint64(2); i < numOfNodes; i++ {
|
||||
require.NoError(t, f.ProcessBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0))
|
||||
}
|
||||
|
||||
s := f.store
|
||||
@@ -169,9 +169,9 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
|
||||
numOfNodes := uint64(100)
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
for i := uint64(2); i < numOfNodes; i++ {
|
||||
require.NoError(t, f.ProcessBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), 0, 0))
|
||||
}
|
||||
|
||||
s := f.store
|
||||
@@ -196,8 +196,8 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
|
||||
func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
f.store.pruneThreshold = 0
|
||||
|
||||
s := f.store
|
||||
@@ -221,18 +221,18 @@ func TestStore_tips(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
|
||||
expectedMap := map[[32]byte]types.Slot{
|
||||
[32]byte{'f'}: 105,
|
||||
[32]byte{'i'}: 106,
|
||||
@@ -250,8 +250,8 @@ func TestStore_tips(t *testing.T) {
|
||||
func TestStore_PruneMapsNodes(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
|
||||
s := f.store
|
||||
s.pruneThreshold = 0
|
||||
@@ -263,9 +263,9 @@ func TestStore_PruneMapsNodes(t *testing.T) {
|
||||
func TestStore_HasParent(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.ProcessBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1, false))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 3, indexToHash(3), indexToHash(2), 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), 1, 1))
|
||||
require.Equal(t, false, f.HasParent(params.BeaconConfig().ZeroHash))
|
||||
require.Equal(t, true, f.HasParent(indexToHash(1)))
|
||||
require.Equal(t, true, f.HasParent(indexToHash(2)))
|
||||
|
||||
@@ -22,7 +22,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 0
|
||||
// /
|
||||
// 2 <- head
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -32,7 +32,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 0
|
||||
// / \
|
||||
// head -> 2 1
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -62,7 +62,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// head -> 2 1
|
||||
// |
|
||||
// 3
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -98,7 +98,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 3
|
||||
// |
|
||||
// 4 <- head
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(4), indexToHash(3), 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(4), indexToHash(3), 1, 1))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -114,7 +114,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 4 <- head
|
||||
// /
|
||||
// 5 <- justified epoch = 2
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 2, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 2))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -130,7 +130,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 4 <- head
|
||||
// / \
|
||||
// 5 6 <- justified epoch = 0
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1))
|
||||
|
||||
// Moved 2 votes to block 5:
|
||||
// 0
|
||||
@@ -142,7 +142,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 4
|
||||
// / \
|
||||
// 2 votes-> 5 6
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1))
|
||||
|
||||
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(5), 4)
|
||||
|
||||
@@ -163,9 +163,9 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 8
|
||||
// |
|
||||
// 9
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(7), indexToHash(5), 2, 2, true))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(8), indexToHash(7), 2, 2, true))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(9), indexToHash(8), 2, 2, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(7), indexToHash(5), 2, 2))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(8), indexToHash(7), 2, 2))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(9), indexToHash(8), 2, 2))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -210,7 +210,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// / \
|
||||
// 2 votes->9 10
|
||||
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(9), 5)
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(10), indexToHash(8), 2, 2, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(10), indexToHash(8), 2, 2))
|
||||
|
||||
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
|
||||
require.NoError(t, err)
|
||||
@@ -289,7 +289,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 9 10
|
||||
// |
|
||||
// head-> 11
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(11), indexToHash(9), 2, 2, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(11), indexToHash(9), 2, 2))
|
||||
|
||||
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -29,13 +29,13 @@ type HeadRetriever interface {
|
||||
|
||||
// BlockProcessor processes the block that's used for accounting fork choice.
|
||||
type BlockProcessor interface {
|
||||
ProcessBlock(ctx context.Context,
|
||||
InsertOptimisticBlock(ctx context.Context,
|
||||
slot types.Slot,
|
||||
root [32]byte,
|
||||
parentRoot [32]byte,
|
||||
justifiedEpoch types.Epoch,
|
||||
finalizedEpoch types.Epoch,
|
||||
optimisticStatus bool) error
|
||||
) error
|
||||
}
|
||||
|
||||
// AttestationProcessor processes the attestation that's used for accounting fork choice.
|
||||
|
||||
@@ -21,6 +21,7 @@ go_library(
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -27,9 +27,9 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
|
||||
// 2 <- justified: 1, finalized: 0
|
||||
// |
|
||||
// 3 <- justified: 2, finalized: 1
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(3), indexToHash(2), 2, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(2), indexToHash(1), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(3), indexToHash(2), 2, 1))
|
||||
|
||||
// With starting justified epoch at 0, the head should be 3:
|
||||
// 0 <- start
|
||||
@@ -89,17 +89,17 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
|
||||
// | |
|
||||
// justified: 2, finalized: 0 -> 9 10 <- justified: 2, finalized: 0
|
||||
// Left branch.
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(3), indexToHash(1), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(5), indexToHash(3), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(7), indexToHash(5), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(9), indexToHash(7), 2, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(3), indexToHash(1), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(5), indexToHash(3), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(7), indexToHash(5), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(9), indexToHash(7), 2, 0))
|
||||
// Right branch.
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 2, indexToHash(4), indexToHash(2), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 3, indexToHash(6), indexToHash(4), 0, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(8), indexToHash(6), 1, 0, false))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 4, indexToHash(10), indexToHash(8), 2, 0, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(4), indexToHash(2), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(6), indexToHash(4), 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(8), indexToHash(6), 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(10), indexToHash(8), 2, 0))
|
||||
|
||||
// With start at 0, the head should be 10:
|
||||
// 0 <-- start
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
pmath "github.com/prysmaticlabs/prysm/math"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -46,19 +47,27 @@ func computeDeltas(
|
||||
if ok {
|
||||
// Protection against out of bound, the `nextDeltaIndex` which defines
|
||||
// the block location in the dag can not exceed the total `delta` length.
|
||||
if int(nextDeltaIndex) >= len(deltas) {
|
||||
if nextDeltaIndex >= uint64(len(deltas)) {
|
||||
return nil, nil, errInvalidNodeDelta
|
||||
}
|
||||
deltas[nextDeltaIndex] += int(newBalance)
|
||||
delta, err := pmath.Int(newBalance)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
deltas[nextDeltaIndex] += delta
|
||||
}
|
||||
|
||||
currentDeltaIndex, ok := blockIndices[vote.currentRoot]
|
||||
if ok {
|
||||
// Protection against out of bound (same as above)
|
||||
if int(currentDeltaIndex) >= len(deltas) {
|
||||
if currentDeltaIndex >= uint64(len(deltas)) {
|
||||
return nil, nil, errInvalidNodeDelta
|
||||
}
|
||||
deltas[currentDeltaIndex] -= int(oldBalance)
|
||||
delta, err := pmath.Int(oldBalance)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
deltas[currentDeltaIndex] -= delta
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 0
|
||||
// /
|
||||
// 2 <- head
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -33,7 +33,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 0
|
||||
// / \
|
||||
// head -> 2 1
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -44,7 +44,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// head -> 2 1
|
||||
// |
|
||||
// 3
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -55,7 +55,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 2 1
|
||||
// | |
|
||||
// head -> 4 3
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(4), indexToHash(2), 1, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(4), indexToHash(2), 1, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -68,7 +68,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// head -> 4 3
|
||||
// |
|
||||
// 5 <- justified epoch = 2
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 1))
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
|
||||
@@ -107,7 +107,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 5
|
||||
// |
|
||||
// 6 <- head
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(5), 2, 1, false))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(5), 2, 1))
|
||||
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 2")
|
||||
|
||||
@@ -220,18 +220,18 @@ func TestSetOptimisticToValid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
|
||||
tests := []struct {
|
||||
root [32]byte // the root of the new VALID block
|
||||
tips map[[32]byte]types.Slot // the old synced tips
|
||||
@@ -409,18 +409,18 @@ func TestSetOptimisticToInvalid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
|
||||
weights := []uint64{10, 10, 9, 7, 1, 6, 2, 3, 1, 1, 1, 0, 0}
|
||||
f.syncedTips.Lock()
|
||||
f.syncedTips.validatedTips = tc.tips
|
||||
@@ -467,18 +467,18 @@ func TestFindSyncedTip(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
|
||||
tests := []struct {
|
||||
root [32]byte // the root of the block
|
||||
tips map[[32]byte]types.Slot // the synced tips
|
||||
@@ -548,3 +548,45 @@ func TestFindSyncedTip(t *testing.T) {
|
||||
syncedTips.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
// This is a regression test (10341)
|
||||
func TestIsOptimistic_DeadLock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 90, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
tips := map[[32]byte]types.Slot{
|
||||
[32]byte{'a'}: 100,
|
||||
[32]byte{'d'}: 102,
|
||||
}
|
||||
f.syncedTips.validatedTips = tips
|
||||
_, err := f.IsOptimistic(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Acquire a write lock, this should not hang
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodesLock.Unlock()
|
||||
_, err = f.IsOptimistic(ctx, [32]byte{'e'})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Acquire a write lock, this should not hang
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodesLock.Unlock()
|
||||
_, err = f.IsOptimistic(ctx, [32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Acquire a write lock, this should not hang
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodesLock.Unlock()
|
||||
|
||||
_, err = f.IsOptimistic(ctx, [32]byte{'c'})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Acquire a write lock, this should not hang
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodesLock.Unlock()
|
||||
|
||||
}
|
||||
|
||||
@@ -41,14 +41,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot := types.Slot(1)
|
||||
newRoot := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{0}, newRoot, fEpoch)
|
||||
@@ -65,14 +64,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot = types.Slot(2)
|
||||
newRoot = indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{1}, newRoot, fEpoch)
|
||||
@@ -91,14 +89,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot = types.Slot(2)
|
||||
newRoot = indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{2}, newRoot, fEpoch)
|
||||
@@ -117,14 +114,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
slot = types.Slot(3)
|
||||
newRoot = indexToHash(4)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
|
||||
@@ -184,14 +180,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
honestBlockSlot := types.Slot(2)
|
||||
honestBlock := indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
honestBlockSlot,
|
||||
honestBlock,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
|
||||
@@ -201,14 +196,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
maliciouslyWithheldBlockSlot := types.Slot(1)
|
||||
maliciouslyWithheldBlock := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
maliciouslyWithheldBlockSlot,
|
||||
maliciouslyWithheldBlock,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -250,14 +244,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
honestBlockSlot := types.Slot(2)
|
||||
honestBlock := indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
honestBlockSlot,
|
||||
honestBlock,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -269,14 +262,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
maliciouslyWithheldBlockSlot := types.Slot(1)
|
||||
maliciouslyWithheldBlock := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
maliciouslyWithheldBlockSlot,
|
||||
maliciouslyWithheldBlock,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -325,14 +317,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
cSlot := types.Slot(2)
|
||||
c := indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
cSlot,
|
||||
c,
|
||||
a, // parent
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -349,14 +340,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
bSlot := types.Slot(1)
|
||||
b := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
bSlot,
|
||||
b,
|
||||
a, // parent
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -373,14 +363,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
dSlot := types.Slot(3)
|
||||
d := indexToHash(3)
|
||||
require.NoError(t,
|
||||
f.ProcessBlock(
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
dSlot,
|
||||
d,
|
||||
b, // parent
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
false,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
pmath "github.com/prysmaticlabs/prysm/math"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -143,23 +144,16 @@ func (f *ForkChoice) ProposerBoost() [fieldparams.RootLength]byte {
|
||||
return f.store.proposerBoost()
|
||||
}
|
||||
|
||||
// ProcessBlock processes a new block by inserting it to the fork choice store.
|
||||
func (f *ForkChoice) ProcessBlock(
|
||||
// InsertOptimisticBlock processes a new block by inserting it to the fork choice store.
|
||||
func (f *ForkChoice) InsertOptimisticBlock(
|
||||
ctx context.Context,
|
||||
slot types.Slot,
|
||||
blockRoot, parentRoot [32]byte,
|
||||
justifiedEpoch, finalizedEpoch types.Epoch, optimistic bool) error {
|
||||
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.ProcessBlock")
|
||||
justifiedEpoch, finalizedEpoch types.Epoch) error {
|
||||
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.InsertOptimisticBlock")
|
||||
defer span.End()
|
||||
|
||||
if err := f.store.insert(ctx, slot, blockRoot, parentRoot, justifiedEpoch, finalizedEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !optimistic {
|
||||
return f.SetOptimisticToValid(ctx, blockRoot)
|
||||
}
|
||||
return nil
|
||||
return f.store.insert(ctx, slot, blockRoot, parentRoot, justifiedEpoch, finalizedEpoch)
|
||||
}
|
||||
|
||||
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
|
||||
@@ -446,7 +440,11 @@ func (s *Store) applyWeightChanges(
|
||||
s.proposerBoostLock.Unlock()
|
||||
return err
|
||||
}
|
||||
nodeDelta = nodeDelta + int(proposerScore)
|
||||
iProposerScore, err := pmath.Int(proposerScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodeDelta = nodeDelta + iProposerScore
|
||||
}
|
||||
s.proposerBoostLock.Unlock()
|
||||
|
||||
|
||||
@@ -491,18 +491,18 @@ func TestStore_PruneSyncedTips(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1, true))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, 1, 1))
|
||||
syncedTips := &optimisticStore{
|
||||
validatedTips: map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
|
||||
@@ -23,7 +23,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 0
|
||||
// /
|
||||
// 2 <- head
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -33,7 +33,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 0
|
||||
// / \
|
||||
// head -> 2 1
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -63,7 +63,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// head -> 2 1
|
||||
// |
|
||||
// 3
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(3), indexToHash(1), 1, 1))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -99,7 +99,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 3
|
||||
// |
|
||||
// 4 <- head
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(4), indexToHash(3), 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(4), indexToHash(3), 1, 1))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -115,7 +115,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 4 <- head
|
||||
// /
|
||||
// 5 <- justified epoch = 2
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 2, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(5), indexToHash(4), 2, 2))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -131,7 +131,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 4 <- head
|
||||
// / \
|
||||
// 5 6 <- justified epoch = 0
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1))
|
||||
|
||||
// Moved 2 votes to block 5:
|
||||
// 0
|
||||
@@ -143,7 +143,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 4
|
||||
// / \
|
||||
// 2 votes-> 5 6
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(4), 1, 1))
|
||||
|
||||
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(5), 4)
|
||||
|
||||
@@ -164,9 +164,9 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 8
|
||||
// |
|
||||
// 9
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(7), indexToHash(5), 2, 2, true))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(8), indexToHash(7), 2, 2, true))
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(9), indexToHash(8), 2, 2, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(7), indexToHash(5), 2, 2))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(8), indexToHash(7), 2, 2))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(9), indexToHash(8), 2, 2))
|
||||
|
||||
r, err = f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -211,7 +211,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// / \
|
||||
// 2 votes->9 10
|
||||
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(9), 5)
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(10), indexToHash(8), 2, 2, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(10), indexToHash(8), 2, 2))
|
||||
|
||||
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
|
||||
require.NoError(t, err)
|
||||
@@ -290,7 +290,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 9 10
|
||||
// |
|
||||
// head-> 11
|
||||
require.NoError(t, f.ProcessBlock(context.Background(), 0, indexToHash(11), indexToHash(9), 2, 2, true))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(11), indexToHash(9), 2, 2))
|
||||
|
||||
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -41,6 +41,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//beacon-chain/sync/checkpoint:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync:go_default_library",
|
||||
"//cmd:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
@@ -23,7 +25,7 @@ func configureTracing(cliCtx *cli.Context) error {
|
||||
func configureChainConfig(cliCtx *cli.Context) {
|
||||
if cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
|
||||
chainConfigFileName := cliCtx.String(cmd.ChainConfigFileFlag.Name)
|
||||
params.LoadChainConfigFile(chainConfigFileName)
|
||||
params.LoadChainConfigFile(chainConfigFileName, nil)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,7 +37,7 @@ func configureHistoricalSlasher(cliCtx *cli.Context) {
|
||||
params.OverrideBeaconConfig(c)
|
||||
cmdConfig := cmd.Get()
|
||||
// Allow up to 4096 attestations at a time to be requested from the beacon nde.
|
||||
cmdConfig.MaxRPCPageSize = int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxAttestations))
|
||||
cmdConfig.MaxRPCPageSize = int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxAttestations)) // lint:ignore uintcast -- Page size should not exceed int64 with these constants.
|
||||
cmd.Init(cmdConfig)
|
||||
log.Warnf(
|
||||
"Setting %d slots per archive point and %d max RPC page size for historical slasher usage. This requires additional storage",
|
||||
@@ -105,10 +107,17 @@ func configureInteropConfig(cliCtx *cli.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func configureExecutionSetting(cliCtx *cli.Context) {
|
||||
if cliCtx.IsSet(flags.FeeRecipient.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.FeeRecipient = common.HexToAddress(cliCtx.String(flags.FeeRecipient.Name))
|
||||
params.OverrideBeaconConfig(c)
|
||||
func configureExecutionSetting(cliCtx *cli.Context) error {
|
||||
if !cliCtx.IsSet(flags.FeeRecipient.Name) {
|
||||
return nil
|
||||
}
|
||||
|
||||
c := params.BeaconConfig()
|
||||
ha := cliCtx.String(flags.FeeRecipient.Name)
|
||||
if !common.IsHexAddress(ha) {
|
||||
return fmt.Errorf("%s is not a valid fee recipient address", ha)
|
||||
}
|
||||
c.DefaultFeeRecipient = common.HexToAddress(ha)
|
||||
params.OverrideBeaconConfig(c)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -93,9 +93,20 @@ func TestConfigureExecutionSetting(t *testing.T) {
|
||||
set.String(flags.FeeRecipient.Name, "", "")
|
||||
require.NoError(t, set.Set(flags.FeeRecipient.Name, "0xB"))
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
err := configureExecutionSetting(cliCtx)
|
||||
require.ErrorContains(t, "0xB is not a valid fee recipient address", err)
|
||||
|
||||
configureExecutionSetting(cliCtx)
|
||||
assert.Equal(t, common.HexToAddress("0xB"), params.BeaconConfig().FeeRecipient)
|
||||
require.NoError(t, set.Set(flags.FeeRecipient.Name, "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"))
|
||||
cliCtx = cli.NewContext(&app, set, nil)
|
||||
err = configureExecutionSetting(cliCtx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.HexToAddress("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
|
||||
|
||||
require.NoError(t, set.Set(flags.FeeRecipient.Name, "0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"))
|
||||
cliCtx = cli.NewContext(&app, set, nil)
|
||||
err = configureExecutionSetting(cliCtx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, common.HexToAddress("0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"), params.BeaconConfig().DefaultFeeRecipient)
|
||||
}
|
||||
|
||||
func TestConfigureNetwork(t *testing.T) {
|
||||
|
||||
@@ -44,6 +44,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
regularsync "github.com/prysmaticlabs/prysm/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/sync/checkpoint"
|
||||
initialsync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
@@ -101,6 +102,8 @@ type BeaconNode struct {
|
||||
slasherAttestationsFeed *event.Feed
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
blockchainFlagOpts []blockchain.Option
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -120,7 +123,9 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
configureEth1Config(cliCtx)
|
||||
configureNetwork(cliCtx)
|
||||
configureInteropConfig(cliCtx)
|
||||
configureExecutionSetting(cliCtx)
|
||||
if err := configureExecutionSetting(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initializes any forks here.
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
@@ -237,7 +242,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
|
||||
// db.DatabasePath is the path to the containing directory
|
||||
// db.NewDBFilename expands that to the canonical full path using
|
||||
// the same constuction as NewDB()
|
||||
// the same construction as NewDB()
|
||||
c, err := newBeaconNodePromCollector(db.NewDBFilename(beacon.db.DatabasePath()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -394,6 +399,13 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
if err := b.db.EnsureEmbeddedGenesis(b.ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.CheckpointInitializer != nil {
|
||||
if err := b.CheckpointInitializer.Initialize(b.ctx, d); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
knownContract, err := b.db.DepositContractAddress(b.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -462,7 +474,29 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startStateGen() error {
|
||||
b.stateGen = stategen.New(b.db)
|
||||
o := make([]stategen.StateGenOption, 0)
|
||||
_, err := b.db.OriginCheckpointBlockRoot(b.ctx)
|
||||
if err == nil {
|
||||
// we'll get a db.ErrNotFound if the db was not initialized with a checkpoint
|
||||
// so if err == nil, the node was initialized with checkpoint init.
|
||||
o = append(o, stategen.WithInitType(stategen.BeaconDBInitTypeCheckpoint))
|
||||
lowestSlot, err := b.db.LowestSyncedBlockSlot(b.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error searching for lowest block slot on checkpoint-initialized node")
|
||||
}
|
||||
o = append(o, stategen.WithMinimumSlot(lowestSlot))
|
||||
} else {
|
||||
if errors.Is(err, db.ErrNotFound) {
|
||||
// error is ErrNotFound, so we can assume the node was initialized with a genesis state (or via powchain)
|
||||
o = append(o, stategen.WithInitType(stategen.BeaconDBInitTypeGenesisState))
|
||||
} else {
|
||||
// if the error is not an ErrNotFound, we failed to query the db for some reason,
|
||||
// can't be confident how it was initialized.
|
||||
return errors.Wrap(err, "could not retrieve checkpoint sync chain origin data from db")
|
||||
}
|
||||
}
|
||||
|
||||
sg := stategen.New(b.db, o...)
|
||||
|
||||
cp, err := b.db.FinalizedCheckpoint(b.ctx)
|
||||
if err != nil {
|
||||
@@ -472,6 +506,12 @@ func (b *BeaconNode) startStateGen() error {
|
||||
r := bytesutil.ToBytes32(cp.Root)
|
||||
// Consider edge case where finalized root are zeros instead of genesis root hash.
|
||||
if r == params.BeaconConfig().ZeroHash {
|
||||
if sg.MinimumSlot() > 0 {
|
||||
// using checkpoint sync the minimum slot will be > 0, but the checkpoint block is marked as finalized,
|
||||
// so if it is equal to ZeroHash, this is likely an error (and the genesis block won't be available).
|
||||
msg := fmt.Sprintf("unable to retrieve genesis block, no slots before %d in db", sg.MinimumSlot())
|
||||
return errors.Wrap(err, msg)
|
||||
}
|
||||
genesisBlock, err := b.db.GenesisBlock(b.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -484,10 +524,12 @@ func (b *BeaconNode) startStateGen() error {
|
||||
}
|
||||
}
|
||||
|
||||
b.finalizedStateAtStartUp, err = b.stateGen.StateByRoot(b.ctx, r)
|
||||
b.finalizedStateAtStartUp, err = sg.StateByRoot(b.ctx, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.stateGen = sg
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -104,8 +104,8 @@ func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *
|
||||
|
||||
span.AddAttributes(
|
||||
trace.BoolAttribute("hasPeer", hasPeer),
|
||||
trace.Int64Attribute("slot", int64(att.Data.Slot)),
|
||||
trace.Int64Attribute("subnet", int64(subnet)),
|
||||
trace.Int64Attribute("slot", int64(att.Data.Slot)), // lint:ignore uintcast -- It's safe to do this for tracing.
|
||||
trace.Int64Attribute("subnet", int64(subnet)), // lint:ignore uintcast -- It's safe to do this for tracing.
|
||||
)
|
||||
|
||||
if !hasPeer {
|
||||
@@ -160,8 +160,8 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
|
||||
|
||||
span.AddAttributes(
|
||||
trace.BoolAttribute("hasPeer", hasPeer),
|
||||
trace.Int64Attribute("slot", int64(sMsg.Slot)),
|
||||
trace.Int64Attribute("subnet", int64(subnet)),
|
||||
trace.Int64Attribute("slot", int64(sMsg.Slot)), // lint:ignore uintcast -- It's safe to do this for tracing.
|
||||
trace.Int64Attribute("subnet", int64(subnet)), // lint:ignore uintcast -- It's safe to do this for tracing.
|
||||
)
|
||||
|
||||
if !hasPeer {
|
||||
@@ -213,7 +213,9 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
|
||||
if span.IsRecordingEvents() {
|
||||
id := hash.FastSum64(buf.Bytes())
|
||||
messageLen := int64(buf.Len())
|
||||
span.AddMessageSendEvent(int64(id), messageLen /*uncompressed*/, messageLen /*compressed*/)
|
||||
// lint:ignore uintcast -- It's safe to do this for tracing.
|
||||
iid := int64(id)
|
||||
span.AddMessageSendEvent(iid, messageLen /*uncompressed*/, messageLen /*compressed*/)
|
||||
}
|
||||
if err := s.PublishToTopic(ctx, topic+s.Encoding().ProtocolSuffix(), buf.Bytes()); err != nil {
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"@com_github_ferranbt_fastssz//:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
|
||||
@@ -3,7 +3,6 @@ package encoder
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
fastssz "github.com/ferranbt/fastssz"
|
||||
@@ -11,6 +10,7 @@ import (
|
||||
"github.com/golang/snappy"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/math"
|
||||
)
|
||||
|
||||
var _ NetworkEncoding = (*SszNetworkEncoder)(nil)
|
||||
@@ -145,11 +145,11 @@ func (_ SszNetworkEncoder) ProtocolSuffix() string {
|
||||
// MaxLength specifies the maximum possible length of an encoded
|
||||
// chunk of data.
|
||||
func (_ SszNetworkEncoder) MaxLength(length uint64) (int, error) {
|
||||
// Defensive check to prevent potential issues when casting to int64.
|
||||
if length > math.MaxInt64 {
|
||||
return 0, errors.Errorf("invalid length provided: %d", length)
|
||||
il, err := math.Int(length)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "invalid length provided")
|
||||
}
|
||||
maxLen := snappy.MaxEncodedLen(int(length))
|
||||
maxLen := snappy.MaxEncodedLen(il)
|
||||
if maxLen < 0 {
|
||||
return 0, errors.Errorf("max encoded length is negative: %d", maxLen)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/math"
|
||||
"github.com/prysmaticlabs/prysm/network/forks"
|
||||
)
|
||||
|
||||
@@ -72,8 +73,8 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsub_pb.Message) string {
|
||||
// the topic byte string, and the raw message data: i.e. SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + uint_to_bytes(uint64(len(message.topic))) + message.topic + message.data)[:20].
|
||||
func postAltairMsgID(pmsg *pubsub_pb.Message, fEpoch types.Epoch) string {
|
||||
topic := *pmsg.Topic
|
||||
topicLen := uint64(len(topic))
|
||||
topicLenBytes := bytesutil.Uint64ToBytesLittleEndian(topicLen)
|
||||
topicLen := len(topic)
|
||||
topicLenBytes := bytesutil.Uint64ToBytesLittleEndian(uint64(topicLen)) // topicLen cannot be negative
|
||||
|
||||
// beyond Bellatrix epoch, allow 10 Mib gossip data size
|
||||
gossipPubSubSize := params.BeaconNetworkConfig().GossipMaxSize
|
||||
@@ -83,7 +84,25 @@ func postAltairMsgID(pmsg *pubsub_pb.Message, fEpoch types.Epoch) string {
|
||||
|
||||
decodedData, err := encoder.DecodeSnappy(pmsg.Data, gossipPubSubSize)
|
||||
if err != nil {
|
||||
totalLength := len(params.BeaconNetworkConfig().MessageDomainInvalidSnappy) + len(topicLenBytes) + int(topicLen) + len(pmsg.Data)
|
||||
totalLength, err := math.AddInt(
|
||||
len(params.BeaconNetworkConfig().MessageDomainValidSnappy),
|
||||
len(topicLenBytes),
|
||||
topicLen,
|
||||
len(pmsg.Data),
|
||||
)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to sum lengths of message domain and topic")
|
||||
// should never happen
|
||||
msg := make([]byte, 20)
|
||||
copy(msg, "invalid")
|
||||
return string(msg)
|
||||
}
|
||||
if uint64(totalLength) > gossipPubSubSize {
|
||||
// this should never happen
|
||||
msg := make([]byte, 20)
|
||||
copy(msg, "invalid")
|
||||
return string(msg)
|
||||
}
|
||||
combinedData := make([]byte, 0, totalLength)
|
||||
combinedData = append(combinedData, params.BeaconNetworkConfig().MessageDomainInvalidSnappy[:]...)
|
||||
combinedData = append(combinedData, topicLenBytes...)
|
||||
@@ -92,7 +111,19 @@ func postAltairMsgID(pmsg *pubsub_pb.Message, fEpoch types.Epoch) string {
|
||||
h := hash.Hash(combinedData)
|
||||
return string(h[:20])
|
||||
}
|
||||
totalLength := len(params.BeaconNetworkConfig().MessageDomainValidSnappy) + len(topicLenBytes) + int(topicLen) + len(decodedData)
|
||||
totalLength, err := math.AddInt(
|
||||
len(params.BeaconNetworkConfig().MessageDomainValidSnappy),
|
||||
len(topicLenBytes),
|
||||
topicLen,
|
||||
len(decodedData),
|
||||
)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to sum lengths of message domain and topic")
|
||||
// should never happen
|
||||
msg := make([]byte, 20)
|
||||
copy(msg, "invalid")
|
||||
return string(msg)
|
||||
}
|
||||
combinedData := make([]byte, 0, totalLength)
|
||||
combinedData = append(combinedData, params.BeaconNetworkConfig().MessageDomainValidSnappy[:]...)
|
||||
combinedData = append(combinedData, topicLenBytes...)
|
||||
|
||||
@@ -2,7 +2,10 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["status.go"],
|
||||
srcs = [
|
||||
"log.go",
|
||||
"status.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
@@ -11,6 +14,7 @@ go_library(
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//time:go_default_library",
|
||||
@@ -22,6 +26,7 @@ go_library(
|
||||
"@com_github_multiformats_go_multiaddr//net:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
5
beacon-chain/p2p/peers/log.go
Normal file
5
beacon-chain/p2p/peers/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package peers
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "peers")
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/crypto/rand"
|
||||
pmath "github.com/prysmaticlabs/prysm/math"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/metadata"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/time"
|
||||
@@ -749,12 +750,12 @@ func (p *Status) PeersToPrune() []peer.ID {
|
||||
return p.deprecatedPeersToPrune()
|
||||
}
|
||||
connLimit := p.ConnectedPeerLimit()
|
||||
inBoundLimit := p.InboundLimit()
|
||||
inBoundLimit := uint64(p.InboundLimit())
|
||||
activePeers := p.Active()
|
||||
numInboundPeers := len(p.InboundConnected())
|
||||
numInboundPeers := uint64(len(p.InboundConnected()))
|
||||
// Exit early if we are still below our max
|
||||
// limit.
|
||||
if len(activePeers) <= int(connLimit) {
|
||||
if uint64(len(activePeers)) <= connLimit {
|
||||
return []peer.ID{}
|
||||
}
|
||||
p.store.Lock()
|
||||
@@ -784,10 +785,15 @@ func (p *Status) PeersToPrune() []peer.ID {
|
||||
|
||||
// Determine amount of peers to prune using our
|
||||
// max connection limit.
|
||||
amountToPrune := len(activePeers) - int(connLimit)
|
||||
amountToPrune, err := pmath.Sub64(uint64(len(activePeers)), connLimit)
|
||||
if err != nil {
|
||||
// This should never happen.
|
||||
log.WithError(err).Error("Failed to determine amount of peers to prune")
|
||||
return []peer.ID{}
|
||||
}
|
||||
|
||||
// Also check for inbound peers above our limit.
|
||||
excessInbound := 0
|
||||
excessInbound := uint64(0)
|
||||
if numInboundPeers > inBoundLimit {
|
||||
excessInbound = numInboundPeers - inBoundLimit
|
||||
}
|
||||
@@ -796,7 +802,7 @@ func (p *Status) PeersToPrune() []peer.ID {
|
||||
if excessInbound > amountToPrune {
|
||||
amountToPrune = excessInbound
|
||||
}
|
||||
if amountToPrune < len(peersToPrune) {
|
||||
if amountToPrune < uint64(len(peersToPrune)) {
|
||||
peersToPrune = peersToPrune[:amountToPrune]
|
||||
}
|
||||
ids := make([]peer.ID, 0, len(peersToPrune))
|
||||
@@ -815,7 +821,7 @@ func (p *Status) deprecatedPeersToPrune() []peer.ID {
|
||||
numInboundPeers := len(p.InboundConnected())
|
||||
// Exit early if we are still below our max
|
||||
// limit.
|
||||
if len(activePeers) <= int(connLimit) {
|
||||
if uint64(len(activePeers)) <= connLimit {
|
||||
return []peer.ID{}
|
||||
}
|
||||
p.store.Lock()
|
||||
@@ -845,18 +851,23 @@ func (p *Status) deprecatedPeersToPrune() []peer.ID {
|
||||
|
||||
// Determine amount of peers to prune using our
|
||||
// max connection limit.
|
||||
amountToPrune := len(activePeers) - int(connLimit)
|
||||
amountToPrune, err := pmath.Sub64(uint64(len(activePeers)), connLimit)
|
||||
if err != nil {
|
||||
// This should never happen
|
||||
log.WithError(err).Error("Failed to determine amount of peers to prune")
|
||||
return []peer.ID{}
|
||||
}
|
||||
// Also check for inbound peers above our limit.
|
||||
excessInbound := 0
|
||||
excessInbound := uint64(0)
|
||||
if numInboundPeers > inBoundLimit {
|
||||
excessInbound = numInboundPeers - inBoundLimit
|
||||
excessInbound = uint64(numInboundPeers - inBoundLimit)
|
||||
}
|
||||
// Prune the largest amount between excess peers and
|
||||
// excess inbound peers.
|
||||
if excessInbound > amountToPrune {
|
||||
amountToPrune = excessInbound
|
||||
}
|
||||
if amountToPrune < len(peersToPrune) {
|
||||
if amountToPrune < uint64(len(peersToPrune)) {
|
||||
peersToPrune = peersToPrune[:amountToPrune]
|
||||
}
|
||||
ids := make([]peer.ID, 0, len(peersToPrune))
|
||||
|
||||
@@ -40,7 +40,7 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
|
||||
defer span.End()
|
||||
|
||||
span.AddAttributes(trace.Int64Attribute("index", int64(index)))
|
||||
span.AddAttributes(trace.Int64Attribute("index", int64(index))) // lint:ignore uintcast -- It's safe to do this for tracing.
|
||||
|
||||
if s.dv5Listener == nil {
|
||||
// return if discovery isn't set
|
||||
@@ -138,7 +138,7 @@ func (s *Service) hasPeerWithSubnet(topic string) bool {
|
||||
// In the event peer threshold is lower, we will choose the lower
|
||||
// threshold.
|
||||
minPeers := mathutil.Min(1, uint64(flags.Get().MinimumPeersPerSubnet))
|
||||
return len(s.pubsub.ListPeers(topic+s.Encoding().ProtocolSuffix())) >= int(minPeers)
|
||||
return len(s.pubsub.ListPeers(topic+s.Encoding().ProtocolSuffix())) >= int(minPeers) // lint:ignore uintcast -- Min peers can be safely cast to int.
|
||||
}
|
||||
|
||||
// Updates the service's discv5 listener record's attestation subnet
|
||||
@@ -195,6 +195,7 @@ func attSubnets(record *enr.Record) ([]uint64, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// lint:ignore uintcast -- subnet count can be safely cast to int.
|
||||
if len(bitV) != byteCount(int(attestationSubnetCount)) {
|
||||
return []uint64{}, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
|
||||
}
|
||||
@@ -214,6 +215,7 @@ func syncSubnets(record *enr.Record) ([]uint64, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// lint:ignore uintcast -- subnet count can be safely cast to int.
|
||||
if len(bitV) != byteCount(int(syncCommsSubnetCount)) {
|
||||
return []uint64{}, errors.Errorf("invalid bitvector provided, it has a size of %d", len(bitV))
|
||||
}
|
||||
|
||||
@@ -122,7 +122,7 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
|
||||
}
|
||||
// Initialize a pointer to eth1 chain's history to start our search
|
||||
// from.
|
||||
cursorNum := big.NewInt(int64(latestBlkHeight))
|
||||
cursorNum := big.NewInt(0).SetUint64(latestBlkHeight)
|
||||
cursorTime := latestBlkTime
|
||||
|
||||
numOfBlocks := uint64(0)
|
||||
@@ -168,9 +168,9 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
|
||||
return s.retrieveHeaderInfo(ctx, cursorNum.Uint64())
|
||||
}
|
||||
if cursorTime > time {
|
||||
return s.findLessTargetEth1Block(ctx, big.NewInt(int64(estimatedBlk)), time)
|
||||
return s.findLessTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
|
||||
}
|
||||
return s.findMoreTargetEth1Block(ctx, big.NewInt(int64(estimatedBlk)), time)
|
||||
return s.findMoreTargetEth1Block(ctx, big.NewInt(0).SetUint64(estimatedBlk), time)
|
||||
}
|
||||
|
||||
// Performs a search to find a target eth1 block which is earlier than or equal to the
|
||||
@@ -214,7 +214,7 @@ func (s *Service) findMoreTargetEth1Block(ctx context.Context, startBlk *big.Int
|
||||
}
|
||||
|
||||
func (s *Service) retrieveHeaderInfo(ctx context.Context, bNum uint64) (*types.HeaderInfo, error) {
|
||||
bn := big.NewInt(int64(bNum))
|
||||
bn := big.NewInt(0).SetUint64(bNum)
|
||||
exists, info, err := s.headerCache.HeaderInfoByHeight(bn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -109,7 +109,7 @@ func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethTypes.Lo
|
||||
// This can happen sometimes when we receive the same log twice from the
|
||||
// ETH1.0 network, and prevents us from updating our trie
|
||||
// with the same log twice, causing an inconsistent state root.
|
||||
index := int64(binary.LittleEndian.Uint64(merkleTreeIndex))
|
||||
index := int64(binary.LittleEndian.Uint64(merkleTreeIndex)) // lint:ignore uintcast -- MerkleTreeIndex should not exceed int64 in your lifetime.
|
||||
if index <= s.lastReceivedMerkleIndex {
|
||||
return nil
|
||||
}
|
||||
@@ -211,7 +211,7 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
|
||||
s.chainStartData.Chainstarted = true
|
||||
s.chainStartData.GenesisBlock = blockNumber.Uint64()
|
||||
|
||||
chainStartTime := time.Unix(int64(genesisTime), 0)
|
||||
chainStartTime := time.Unix(int64(genesisTime), 0) // lint:ignore uintcast -- Genesis time wont exceed int64 in your lifetime.
|
||||
|
||||
for i := range s.chainStartData.ChainstartDeposits {
|
||||
proof, err := s.depositTrie.MerkleProof(i)
|
||||
@@ -303,8 +303,8 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
Addresses: []common.Address{
|
||||
s.cfg.depositContractAddr,
|
||||
},
|
||||
FromBlock: big.NewInt(int64(start)),
|
||||
ToBlock: big.NewInt(int64(end)),
|
||||
FromBlock: big.NewInt(0).SetUint64(start),
|
||||
ToBlock: big.NewInt(0).SetUint64(end),
|
||||
}
|
||||
remainingLogs := logCount - uint64(s.lastReceivedMerkleIndex+1)
|
||||
// only change the end block if the remaining logs are below the required log limit.
|
||||
@@ -312,7 +312,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
withinLimit := remainingLogs < depositlogRequestLimit
|
||||
aboveFollowHeight := end >= latestFollowHeight
|
||||
if withinLimit && aboveFollowHeight {
|
||||
query.ToBlock = big.NewInt(int64(latestFollowHeight))
|
||||
query.ToBlock = big.NewInt(0).SetUint64(latestFollowHeight)
|
||||
end = latestFollowHeight
|
||||
}
|
||||
logs, err := s.httpLogger.FilterLogs(ctx, query)
|
||||
@@ -391,7 +391,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
if fState != nil && !fState.IsNil() && fState.Eth1DepositIndex() > 0 {
|
||||
s.cfg.depositCache.PrunePendingDeposits(ctx, int64(fState.Eth1DepositIndex()))
|
||||
s.cfg.depositCache.PrunePendingDeposits(ctx, int64(fState.Eth1DepositIndex())) // lint:ignore uintcast -- Deposit index should not exceed int64 in your lifetime.
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -413,11 +413,11 @@ func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
|
||||
}
|
||||
for i := s.latestEth1Data.LastRequestedBlock + 1; i <= requestedBlock; i++ {
|
||||
// Cache eth1 block header here.
|
||||
_, err := s.BlockHashByHeight(ctx, big.NewInt(int64(i)))
|
||||
_, err := s.BlockHashByHeight(ctx, big.NewInt(0).SetUint64(i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = s.ProcessETH1Block(ctx, big.NewInt(int64(i)))
|
||||
err = s.ProcessETH1Block(ctx, big.NewInt(0).SetUint64(i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -613,8 +613,10 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
|
||||
// accumulates. we finalize them here before we are ready to receive a block.
|
||||
// Otherwise, the first few blocks will be slower to compute as we will
|
||||
// hold the lock and be busy finalizing the deposits.
|
||||
s.cfg.depositCache.InsertFinalizedDeposits(ctx, int64(currIndex))
|
||||
s.cfg.depositCache.InsertFinalizedDeposits(ctx, int64(currIndex)) // lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
|
||||
// lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
|
||||
if err = s.cfg.depositCache.PruneProofs(ctx, int64(currIndex)); err != nil {
|
||||
return errors.Wrap(err, "could not prune deposit proofs")
|
||||
}
|
||||
@@ -662,7 +664,7 @@ func (s *Service) batchRequestHeaders(startBlock, endBlock uint64) ([]*gethTypes
|
||||
err := error(nil)
|
||||
elems = append(elems, gethRPC.BatchElem{
|
||||
Method: "eth_getBlockByNumber",
|
||||
Args: []interface{}{hexutil.EncodeBig(big.NewInt(int64(i))), false},
|
||||
Args: []interface{}{hexutil.EncodeBig(big.NewInt(0).SetUint64(i)), false},
|
||||
Result: header,
|
||||
Error: err,
|
||||
})
|
||||
@@ -1088,7 +1090,7 @@ func dedupEndpoints(endpoints []string) []string {
|
||||
func eth1HeadIsBehind(timestamp uint64) bool {
|
||||
timeout := prysmTime.Now().Add(-eth1Threshold)
|
||||
// check that web3 client is syncing
|
||||
return time.Unix(int64(timestamp), 0).Before(timeout)
|
||||
return time.Unix(int64(timestamp), 0).Before(timeout) // lint:ignore uintcast -- timestamp will not exceed int64 in your lifetime.
|
||||
}
|
||||
|
||||
func (s *Service) primaryConnected() bool {
|
||||
|
||||
@@ -10,7 +10,7 @@ go_library(
|
||||
"structs_marshalling.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/rpc/apimiddleware",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/gateway/apimiddleware:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
|
||||
@@ -181,9 +181,14 @@ type altairPublishBlockRequestJson struct {
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type bellatrixPublishBlockRequestJson struct {
|
||||
BellatrixBlock *beaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
// setInitialPublishBlockPostRequest is triggered before we deserialize the request JSON into a struct.
|
||||
// We don't know which version of the block got posted, but we can determine it from the slot.
|
||||
// We know that both Phase 0 and Altair blocks have a Message field with a Slot field,
|
||||
// We know that blocks of all versions have a Message field with a Slot field,
|
||||
// so we deserialize the request into a struct s, which has the right fields, to obtain the slot.
|
||||
// Once we know the slot, we can determine what the PostRequest field of the endpoint should be, and we set it appropriately.
|
||||
func setInitialPublishBlockPostRequest(endpoint *apimiddleware.Endpoint,
|
||||
@@ -207,17 +212,20 @@ func setInitialPublishBlockPostRequest(endpoint *apimiddleware.Endpoint,
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "slot is not an unsigned integer")
|
||||
}
|
||||
if slots.ToEpoch(types.Slot(slot)) < params.BeaconConfig().AltairForkEpoch {
|
||||
currentEpoch := slots.ToEpoch(types.Slot(slot))
|
||||
if currentEpoch < params.BeaconConfig().AltairForkEpoch {
|
||||
endpoint.PostRequest = &signedBeaconBlockContainerJson{}
|
||||
} else {
|
||||
} else if currentEpoch < params.BeaconConfig().BellatrixForkEpoch {
|
||||
endpoint.PostRequest = &signedBeaconBlockAltairContainerJson{}
|
||||
} else {
|
||||
endpoint.PostRequest = &signedBeaconBlockBellatrixContainerJson{}
|
||||
}
|
||||
req.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// In preparePublishedBlock we transform the PostRequest.
|
||||
// gRPC expects either a phase0_block or an altair_block field in the JSON object, but we have a message field at this point.
|
||||
// gRPC expects an XXX_block field in the JSON object, but we have a message field at this point.
|
||||
// We do a simple conversion depending on the type of endpoint.PostRequest
|
||||
// (which was filled out previously in setInitialPublishBlockPostRequest).
|
||||
func preparePublishedBlock(endpoint *apimiddleware.Endpoint, _ http.ResponseWriter, _ *http.Request) apimiddleware.ErrorJson {
|
||||
@@ -239,6 +247,15 @@ func preparePublishedBlock(endpoint *apimiddleware.Endpoint, _ http.ResponseWrit
|
||||
endpoint.PostRequest = actualPostReq
|
||||
return nil
|
||||
}
|
||||
if block, ok := endpoint.PostRequest.(*signedBeaconBlockBellatrixContainerJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
actualPostReq := &bellatrixPublishBlockRequestJson{
|
||||
BellatrixBlock: block.Message,
|
||||
Signature: block.Signature,
|
||||
}
|
||||
endpoint.PostRequest = actualPostReq
|
||||
return nil
|
||||
}
|
||||
return apimiddleware.InternalServerError(errors.New("unsupported block type"))
|
||||
}
|
||||
|
||||
|
||||
@@ -363,6 +363,11 @@ func TestWrapSignedContributionAndProofsArray(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSetInitialPublishBlockPostRequest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.BellatrixForkEpoch = params.BeaconConfig().AltairForkEpoch + 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
endpoint := &apimiddleware.Endpoint{}
|
||||
s := struct {
|
||||
Message struct {
|
||||
@@ -397,6 +402,21 @@ func TestSetInitialPublishBlockPostRequest(t *testing.T) {
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
assert.Equal(t, reflect.TypeOf(signedBeaconBlockAltairContainerJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
s.Message = struct{ Slot string }{Slot: strconv.FormatUint(uint64(slot), 10)}
|
||||
j, err := json.Marshal(s)
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(j)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
runDefault, errJson := setInitialPublishBlockPostRequest(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
assert.Equal(t, reflect.TypeOf(signedBeaconBlockBellatrixContainerJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
|
||||
})
|
||||
}
|
||||
|
||||
func TestPreparePublishedBlock(t *testing.T) {
|
||||
@@ -428,6 +448,20 @@ func TestPreparePublishedBlock(t *testing.T) {
|
||||
assert.Equal(t, true, ok)
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &signedBeaconBlockBellatrixContainerJson{
|
||||
Message: &beaconBlockBellatrixJson{
|
||||
Body: &beaconBlockBodyBellatrixJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
errJson := preparePublishedBlock(endpoint, nil, nil)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
_, ok := endpoint.PostRequest.(*bellatrixPublishBlockRequestJson)
|
||||
assert.Equal(t, true, ok)
|
||||
})
|
||||
|
||||
t.Run("unsupported block type", func(t *testing.T) {
|
||||
errJson := preparePublishedBlock(&apimiddleware.Endpoint{}, nil, nil)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported block type"))
|
||||
@@ -471,8 +505,7 @@ func TestSerializeV2Block(t *testing.T) {
|
||||
StateRoot: "root",
|
||||
Body: &beaconBlockBodyJson{},
|
||||
},
|
||||
AltairBlock: nil,
|
||||
Signature: "sig",
|
||||
Signature: "sig",
|
||||
},
|
||||
}
|
||||
runDefault, j, errJson := serializeV2Block(response)
|
||||
@@ -495,7 +528,6 @@ func TestSerializeV2Block(t *testing.T) {
|
||||
response := &blockV2ResponseJson{
|
||||
Version: ethpbv2.Version_ALTAIR.String(),
|
||||
Data: &signedBeaconBlockContainerV2Json{
|
||||
Phase0Block: nil,
|
||||
AltairBlock: &beaconBlockAltairJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
@@ -522,6 +554,36 @@ func TestSerializeV2Block(t *testing.T) {
|
||||
require.NotNil(t, beaconBlock.Body)
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
response := &blockV2ResponseJson{
|
||||
Version: ethpbv2.Version_BELLATRIX.String(),
|
||||
Data: &signedBeaconBlockContainerV2Json{
|
||||
BellatrixBlock: &beaconBlockBellatrixJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &beaconBlockBodyBellatrixJson{},
|
||||
},
|
||||
Signature: "sig",
|
||||
},
|
||||
}
|
||||
runDefault, j, errJson := serializeV2Block(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &bellatrixBlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data.Message)
|
||||
beaconBlock := resp.Data.Message
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
require.NotNil(t, beaconBlock.Body)
|
||||
})
|
||||
|
||||
t.Run("incorrect response type", func(t *testing.T) {
|
||||
response := &types.Empty{}
|
||||
runDefault, j, errJson := serializeV2Block(response)
|
||||
|
||||
@@ -37,6 +37,7 @@ func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
"/eth/v1/beacon/pool/proposer_slashings",
|
||||
"/eth/v1/beacon/pool/voluntary_exits",
|
||||
"/eth/v1/beacon/pool/sync_committees",
|
||||
"/eth/v1/beacon/weak_subjectivity",
|
||||
"/eth/v1/node/identity",
|
||||
"/eth/v1/node/peers",
|
||||
"/eth/v1/node/peers/{peer_id}",
|
||||
@@ -141,6 +142,8 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: wrapSyncCommitteeSignaturesArray,
|
||||
}
|
||||
case "/eth/v1/beacon/weak_subjectivity":
|
||||
endpoint.GetResponse = &WeakSubjectivityResponse{}
|
||||
case "/eth/v1/node/identity":
|
||||
endpoint.GetResponse = &identityResponseJson{}
|
||||
case "/eth/v1/node/peers":
|
||||
|
||||
@@ -19,6 +19,15 @@ type genesisResponse_GenesisJson struct {
|
||||
GenesisForkVersion string `json:"genesis_fork_version" hex:"true"`
|
||||
}
|
||||
|
||||
// WeakSubjectivityResponse is used to marshal/unmarshal the response for the
|
||||
// /eth/v1/beacon/weak_subjectivity endpoint.
|
||||
type WeakSubjectivityResponse struct {
|
||||
Data *struct {
|
||||
Checkpoint *checkpointJson `json:"ws_checkpoint"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// stateRootResponseJson is used in /beacon/states/{state_id}/root API endpoint.
|
||||
type stateRootResponseJson struct {
|
||||
Data *stateRootResponse_StateRootJson `json:"data"`
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/proto/migration"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -40,6 +42,43 @@ func (e *blockIdParseError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// GetWeakSubjectivity computes the starting epoch of the current weak subjectivity period, and then also
|
||||
// determines the best block root and state root to use for a Checkpoint Sync starting from that point.
|
||||
func (bs *Server) GetWeakSubjectivity(ctx context.Context, _ *empty.Empty) (*ethpbv1.WeakSubjectivityResponse, error) {
|
||||
hs, err := bs.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "could not get head state")
|
||||
}
|
||||
wsEpoch, err := helpers.LatestWeakSubjectivityEpoch(ctx, hs)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "could not get weak subjectivity epoch: %v", err)
|
||||
}
|
||||
wsSlot, err := slots.EpochStart(wsEpoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "could not get weak subjectivity slot: %v", err)
|
||||
}
|
||||
blks, err := bs.BeaconDB.HighestSlotBlocksBelow(ctx, wsSlot+1)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("could not find highest block below slot %d", wsSlot))
|
||||
}
|
||||
block := blks[0]
|
||||
blockRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("failed to compute hash_tree_root for block at slot=%d", block.Block().Slot()))
|
||||
}
|
||||
stateRoot := bytesutil.ToBytes32(block.Block().StateRoot())
|
||||
log.Printf("weak subjectivity checkpoint reported as epoch=%d, block root=%#x, state root=%#x", wsEpoch, blockRoot, stateRoot)
|
||||
return ðpbv1.WeakSubjectivityResponse{
|
||||
Data: ðpbv1.WeakSubjectivityData{
|
||||
WsCheckpoint: ðpbv1.Checkpoint{
|
||||
Epoch: wsEpoch,
|
||||
Root: blockRoot[:],
|
||||
},
|
||||
StateRoot: stateRoot[:],
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetBlockHeader retrieves block header for given block id.
|
||||
func (bs *Server) GetBlockHeader(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv1.BlockHeaderResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.GetBlockHeader")
|
||||
@@ -48,7 +87,7 @@ func (bs *Server) GetBlockHeader(ctx context.Context, req *ethpbv1.BlockRequest)
|
||||
blk, err := bs.blockFromBlockID(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "GetBlockHeader")
|
||||
}
|
||||
v1alpha1Header, err := blk.Header()
|
||||
if err != nil {
|
||||
@@ -267,7 +306,7 @@ func (bs *Server) GetBlock(ctx context.Context, req *ethpbv1.BlockRequest) (*eth
|
||||
blk, err := bs.blockFromBlockID(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "GetBlock")
|
||||
}
|
||||
signedBeaconBlock, err := migration.SignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
@@ -290,7 +329,7 @@ func (bs *Server) GetBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequest) (*
|
||||
blk, err := bs.blockFromBlockID(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "GetBlockSSZ")
|
||||
}
|
||||
signedBeaconBlock, err := migration.SignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
@@ -312,7 +351,7 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
|
||||
blk, err := bs.blockFromBlockID(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "GetBlockV2")
|
||||
}
|
||||
|
||||
_, err = blk.PbPhase0Block()
|
||||
@@ -389,7 +428,7 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
|
||||
blk, err := bs.blockFromBlockID(ctx, req.BlockId)
|
||||
err = handleGetBlockError(blk, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "GetBlockSSZV2")
|
||||
}
|
||||
|
||||
_, err = blk.PbPhase0Block()
|
||||
|
||||
@@ -100,7 +100,7 @@ func TestGetSpec(t *testing.T) {
|
||||
config.TerminalBlockHash = common.HexToHash("TerminalBlockHash")
|
||||
config.TerminalBlockHashActivationEpoch = 72
|
||||
config.TerminalTotalDifficulty = "73"
|
||||
config.FeeRecipient = common.HexToAddress("FeeRecipient")
|
||||
config.DefaultFeeRecipient = common.HexToAddress("DefaultFeeRecipient")
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
@@ -329,8 +329,8 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, common.HexToHash("TerminalBlockHash"), common.HexToHash(v))
|
||||
case "TERMINAL_TOTAL_DIFFICULTY":
|
||||
assert.Equal(t, "73", v)
|
||||
case "FeeRecipient":
|
||||
assert.Equal(t, common.HexToAddress("FeeRecipient"), v)
|
||||
case "DefaultFeeRecipient":
|
||||
assert.Equal(t, common.HexToAddress("DefaultFeeRecipient"), v)
|
||||
case "PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX":
|
||||
assert.Equal(t, "3", v)
|
||||
case "MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX":
|
||||
|
||||
@@ -117,12 +117,7 @@ func (bs *Server) GetFinalityCheckpoints(ctx context.Context, req *ethpb.StateRe
|
||||
|
||||
st, err = bs.StateFetcher.State(ctx, req.StateId)
|
||||
if err != nil {
|
||||
if stateNotFoundErr, ok := err.(*statefetcher.StateNotFoundError); ok {
|
||||
return nil, status.Errorf(codes.NotFound, "State not found: %v", stateNotFoundErr)
|
||||
} else if parseErr, ok := err.(*statefetcher.StateIdParseError); ok {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Invalid state ID: %v", parseErr)
|
||||
}
|
||||
return nil, status.Errorf(codes.Internal, "Could not get state: %v", err)
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
|
||||
return ðpb.StateFinalityCheckpointResponse{
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user