mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
97 Commits
unrealized
...
ci-verbose
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6219d72dbb | ||
|
|
4de92bafc4 | ||
|
|
69438583e5 | ||
|
|
e81f3fed01 | ||
|
|
1b2a5fb4a5 | ||
|
|
6c878b1665 | ||
|
|
838963c9f7 | ||
|
|
7b38f8b8fc | ||
|
|
23e8e695cc | ||
|
|
ce9eaae22e | ||
|
|
7010e8dec8 | ||
|
|
9e4ba75e71 | ||
|
|
044a4ad5a3 | ||
|
|
690084cab6 | ||
|
|
88db7117d2 | ||
|
|
1faa292615 | ||
|
|
434018a4b9 | ||
|
|
54624569bf | ||
|
|
b55ddb5a34 | ||
|
|
a38de90435 | ||
|
|
d454d30f19 | ||
|
|
b04dd9fe5c | ||
|
|
8140a1a7e0 | ||
|
|
cab9917317 | ||
|
|
4c4fb9f2c0 | ||
|
|
80f4f22401 | ||
|
|
dd296cbd8a | ||
|
|
f9e3b0a3c2 | ||
|
|
a58809597e | ||
|
|
7f443e8387 | ||
|
|
18fc17c903 | ||
|
|
d7b01b9d81 | ||
|
|
7ebd9035dd | ||
|
|
578fea73d7 | ||
|
|
7fcadbe3ef | ||
|
|
fce9e6883d | ||
|
|
5ee66a4a68 | ||
|
|
f4c7fb6182 | ||
|
|
077dcdc643 | ||
|
|
1fa864cb1a | ||
|
|
6357860cc2 | ||
|
|
cc1ea81d4a | ||
|
|
dd65622441 | ||
|
|
6c39301f33 | ||
|
|
5b12f5a27d | ||
|
|
105d73d59b | ||
|
|
db687bf56d | ||
|
|
f0403afb25 | ||
|
|
3f309968d4 | ||
|
|
52acaceb3f | ||
|
|
5216402f66 | ||
|
|
2536195be0 | ||
|
|
a4b9e006af | ||
|
|
76b941b310 | ||
|
|
d099c2790b | ||
|
|
2fc3d41ffa | ||
|
|
2e4174bdd6 | ||
|
|
a170fd4bd6 | ||
|
|
3e36eacd1e | ||
|
|
0e9dfe04a1 | ||
|
|
302a5cf00b | ||
|
|
c72f1af951 | ||
|
|
2bc3ef29e0 | ||
|
|
bc91d63fcf | ||
|
|
8f18920ac7 | ||
|
|
2cc62cdf40 | ||
|
|
95c140b512 | ||
|
|
7563bc0444 | ||
|
|
4353d39daa | ||
|
|
2de2000eb7 | ||
|
|
94f6389ebd | ||
|
|
b582ca27e6 | ||
|
|
2586a9e667 | ||
|
|
87251d627d | ||
|
|
3ff285dda5 | ||
|
|
364ad3fbda | ||
|
|
4cbb69602f | ||
|
|
64920d719d | ||
|
|
3cf385fe91 | ||
|
|
adabd1fa4f | ||
|
|
9be2111b7a | ||
|
|
1dec9eb912 | ||
|
|
a2951ec37d | ||
|
|
216fdb48cf | ||
|
|
61c5e2a443 | ||
|
|
da9f72360d | ||
|
|
1be46aa16e | ||
|
|
a1a12243be | ||
|
|
a1dd2e6b8c | ||
|
|
ecb605814e | ||
|
|
61cbe3709b | ||
|
|
7039c382bf | ||
|
|
4f00984ab1 | ||
|
|
edb03328ea | ||
|
|
c922eb9bfc | ||
|
|
051a83a83d | ||
|
|
9466a0df11 |
@@ -44,3 +44,6 @@ build --flaky_test_attempts=5
|
||||
# Better caching
|
||||
build:nostamp --nostamp
|
||||
build:nostamp --workspace_status_command=./hack/workspace_status_ci.sh
|
||||
|
||||
# More verbose tests
|
||||
test --test_arg=-test.v
|
||||
|
||||
10
.github/CODEOWNERS
vendored
10
.github/CODEOWNERS
vendored
@@ -9,8 +9,8 @@ deps.bzl @prysmaticlabs/core-team
|
||||
|
||||
# Radek and Nishant are responsible for changes that can affect the native state feature.
|
||||
# See https://www.notion.so/prysmaticlabs/Native-Beacon-State-Redesign-6cc9744b4ec1439bb34fa829b36a35c1
|
||||
/beacon-chain/state/fieldtrie/ @rkapka @nisdas
|
||||
/beacon-chain/state/v1/ @rkapka @nisdas
|
||||
/beacon-chain/state/v2/ @rkapka @nisdas
|
||||
/beacon-chain/state/v3/ @rkapka @nisdas
|
||||
/beacon-chain/state/state-native/ @rkapka @nisdas
|
||||
/beacon-chain/state/fieldtrie/ @rkapka @nisdas @rauljordan
|
||||
/beacon-chain/state/v1/ @rkapka @nisdas @rauljordan
|
||||
/beacon-chain/state/v2/ @rkapka @nisdas @rauljordan
|
||||
/beacon-chain/state/v3/ @rkapka @nisdas @rauljordan
|
||||
/beacon-chain/state/state-native/ @rkapka @nisdas @rauljordan
|
||||
|
||||
14
.github/workflows/go.yml
vendored
14
.github/workflows/go.yml
vendored
@@ -64,7 +64,6 @@ jobs:
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode -E errcheck -E gosimple --skip-files=validator/web/site_data.go --skip-dirs=proto --go=1.18
|
||||
version: v1.45.2
|
||||
skip-go-installation: true
|
||||
|
||||
@@ -88,11 +87,14 @@ jobs:
|
||||
- name: Build
|
||||
# Use blst tag to allow go and bazel builds for blst.
|
||||
run: go build -v ./...
|
||||
|
||||
# fuzz and blst_disabled leverage go tag based stubs at compile time.
|
||||
# Building with these tags should be checked and enforced at pre-submit.
|
||||
- name: Build for fuzzing
|
||||
run: go build -tags=fuzz,blst_disabled ./...
|
||||
env:
|
||||
CGO_CFLAGS: "-O -D__BLST_PORTABLE__"
|
||||
# fuzz leverage go tag based stubs at compile time.
|
||||
# Building and testing with these tags should be checked and enforced at pre-submit.
|
||||
- name: Test for fuzzing
|
||||
run: go test -tags=fuzz,develop ./... -test.run=^Fuzz
|
||||
env:
|
||||
CGO_CFLAGS: "-O -D__BLST_PORTABLE__"
|
||||
|
||||
# Tests run via Bazel for now...
|
||||
# - name: Test
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -35,3 +35,6 @@ bin
|
||||
|
||||
# p2p metaData
|
||||
metaData
|
||||
|
||||
# execution API authentication
|
||||
jwt.hex
|
||||
|
||||
@@ -1,69 +1,26 @@
|
||||
linters-settings:
|
||||
govet:
|
||||
check-shadowing: true
|
||||
settings:
|
||||
printf:
|
||||
funcs:
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf
|
||||
golint:
|
||||
min-confidence: 0
|
||||
gocyclo:
|
||||
min-complexity: 10
|
||||
maligned:
|
||||
suggest-new: true
|
||||
dupl:
|
||||
threshold: 100
|
||||
goconst:
|
||||
min-len: 2
|
||||
min-occurrences: 2
|
||||
depguard:
|
||||
list-type: blacklist
|
||||
packages:
|
||||
# logging is allowed only by logutils.Log, logrus
|
||||
# is allowed to use only in logutils package
|
||||
- github.com/sirupsen/logrus
|
||||
misspell:
|
||||
locale: US
|
||||
lll:
|
||||
line-length: 140
|
||||
goimports:
|
||||
local-prefixes: github.com/golangci/golangci-lint
|
||||
gocritic:
|
||||
enabled-tags:
|
||||
- performance
|
||||
- style
|
||||
- experimental
|
||||
disabled-checks:
|
||||
- wrapperFunc
|
||||
run:
|
||||
skip-files:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
skip-dirs:
|
||||
- proto
|
||||
- tools/analyzers
|
||||
timeout: 10m
|
||||
go: '1.18'
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- deadcode
|
||||
- goconst
|
||||
- goimports
|
||||
- golint
|
||||
- gosec
|
||||
- misspell
|
||||
- structcheck
|
||||
- typecheck
|
||||
- unparam
|
||||
- varcheck
|
||||
- gofmt
|
||||
- unused
|
||||
disable-all: true
|
||||
- errcheck
|
||||
- gosimple
|
||||
- gocognit
|
||||
|
||||
run:
|
||||
skip-dirs:
|
||||
- proto/
|
||||
- ^contracts/
|
||||
deadline: 10m
|
||||
linters-settings:
|
||||
gocognit:
|
||||
# TODO: We should target for < 50
|
||||
min-complexity: 97
|
||||
|
||||
# golangci.com configuration
|
||||
# https://github.com/golangci/golangci/wiki/Configuration
|
||||
service:
|
||||
golangci-lint-version: 1.15.0 # use the fixed version to not introduce new linters unexpectedly
|
||||
prepare:
|
||||
- echo "here I can run custom commands, but no preparation needed for this repo"
|
||||
output:
|
||||
print-issued-lines: true
|
||||
sort-results: true
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.1.10)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.2.0-rc.1)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-alpha.9/src/engine)
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||
|
||||
@@ -231,7 +231,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "57120e8e2d5aaec956fc6a25ddc58fae2477f5b3ac7789174cf5ac1106dcc151",
|
||||
sha256 = "9c93f87378aaa6d6fe1c67b396eac2aacc9594af2a83f028cb99c95dea5b81df",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -247,7 +247,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "aa46676b26c173274ec8ea8756ae3072474b73ef7ccc7414d4026884810d8de2",
|
||||
sha256 = "52f2c52415228cee8a4de5a09abff785f439a77dfef8f03e834e4e16857673c1",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -263,7 +263,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "d7dba93110cf35d9575ce21af6b7c3989f4aba621a9749bc090bca216e0345f7",
|
||||
sha256 = "022dcc0d6de7dd27b337a0d1b945077eaf5ee47000700395a693fc25e12f96df",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
|
||||
@@ -26,69 +26,126 @@ type OriginData struct {
|
||||
bb []byte
|
||||
st state.BeaconState
|
||||
b interfaces.SignedBeaconBlock
|
||||
cf *detect.VersionedUnmarshaler
|
||||
}
|
||||
|
||||
// CheckpointString returns the standard string representation of a Checkpoint for the block root and epoch for the
|
||||
// SignedBeaconBlock value found by DownloadOriginData.
|
||||
// The format is a a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
|
||||
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
|
||||
func (od *OriginData) CheckpointString() string {
|
||||
return fmt.Sprintf("%#x:%d", od.wsd.BlockRoot, od.wsd.Epoch)
|
||||
vu *detect.VersionedUnmarshaler
|
||||
br [32]byte
|
||||
sr [32]byte
|
||||
}
|
||||
|
||||
// SaveBlock saves the downloaded block to a unique file in the given path.
|
||||
// For readability and collision avoidance, the file name includes: type, config name, slot and root
|
||||
func (od *OriginData) SaveBlock(dir string) (string, error) {
|
||||
blockPath := path.Join(dir, fname("block", od.cf, od.b.Block().Slot(), od.wsd.BlockRoot))
|
||||
return blockPath, file.WriteFile(blockPath, od.BlockBytes())
|
||||
func (o *OriginData) SaveBlock(dir string) (string, error) {
|
||||
blockPath := path.Join(dir, fname("block", o.vu, o.b.Block().Slot(), o.br))
|
||||
return blockPath, file.WriteFile(blockPath, o.BlockBytes())
|
||||
}
|
||||
|
||||
// SaveState saves the downloaded state to a unique file in the given path.
|
||||
// For readability and collision avoidance, the file name includes: type, config name, slot and root
|
||||
func (od *OriginData) SaveState(dir string) (string, error) {
|
||||
statePath := path.Join(dir, fname("state", od.cf, od.st.Slot(), od.wsd.StateRoot))
|
||||
return statePath, file.WriteFile(statePath, od.StateBytes())
|
||||
func (o *OriginData) SaveState(dir string) (string, error) {
|
||||
statePath := path.Join(dir, fname("state", o.vu, o.st.Slot(), o.sr))
|
||||
return statePath, file.WriteFile(statePath, o.StateBytes())
|
||||
}
|
||||
|
||||
// StateBytes returns the ssz-encoded bytes of the downloaded BeaconState value.
|
||||
func (od *OriginData) StateBytes() []byte {
|
||||
return od.sb
|
||||
func (o *OriginData) StateBytes() []byte {
|
||||
return o.sb
|
||||
}
|
||||
|
||||
// BlockBytes returns the ssz-encoded bytes of the downloaded SignedBeaconBlock value.
|
||||
func (od *OriginData) BlockBytes() []byte {
|
||||
return od.bb
|
||||
func (o *OriginData) BlockBytes() []byte {
|
||||
return o.bb
|
||||
}
|
||||
|
||||
func fname(prefix string, cf *detect.VersionedUnmarshaler, slot types.Slot, root [32]byte) string {
|
||||
return fmt.Sprintf("%s_%s_%s_%d-%#x.ssz", prefix, cf.Config.ConfigName, version.String(cf.Fork), slot, root)
|
||||
func fname(prefix string, vu *detect.VersionedUnmarshaler, slot types.Slot, root [32]byte) string {
|
||||
return fmt.Sprintf("%s_%s_%s_%d-%#x.ssz", prefix, vu.Config.ConfigName, version.String(vu.Fork), slot, root)
|
||||
}
|
||||
|
||||
// this method downloads the head state, which can be used to find the correct chain config
|
||||
// and use prysm's helper methods to compute the latest weak subjectivity epoch.
|
||||
func getWeakSubjectivityEpochFromHead(ctx context.Context, client *Client) (types.Epoch, error) {
|
||||
headBytes, err := client.GetState(ctx, IdHead)
|
||||
// DownloadFinalizedData downloads the most recently finalized state, and the block most recently applied to that state.
|
||||
// This pair can be used to initialize a new beacon node via checkpoint sync.
|
||||
func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, error) {
|
||||
sb, err := client.GetState(ctx, IdFinalized)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return nil, err
|
||||
}
|
||||
cf, err := detect.FromState(headBytes)
|
||||
vu, err := detect.FromState(sb)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
return nil, errors.Wrap(err, "error detecting chain config for finalized state")
|
||||
}
|
||||
log.Printf("detected supported config in remote head state, name=%s, fork=%s", cf.Config.ConfigName, version.String(cf.Fork))
|
||||
headState, err := cf.UnmarshalBeaconState(headBytes)
|
||||
log.Printf("detected supported config in remote finalized state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
|
||||
s, err := vu.UnmarshalBeaconState(sb)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error unmarshaling state to correct version")
|
||||
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
|
||||
}
|
||||
|
||||
epoch, err := helpers.LatestWeakSubjectivityEpoch(ctx, headState, cf.Config)
|
||||
sr, err := s.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error computing the weak subjectivity epoch from head state")
|
||||
return nil, errors.Wrapf(err, "failed to compute htr for finalized state at slot=%d", s.Slot())
|
||||
}
|
||||
header := s.LatestBlockHeader()
|
||||
header.StateRoot = sr[:]
|
||||
br, err := header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error while computing block root using state data")
|
||||
}
|
||||
|
||||
log.Printf("(computed client-side) weak subjectivity epoch = %d", epoch)
|
||||
return epoch, nil
|
||||
bb, err := client.GetBlock(ctx, IdFromRoot(br))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting block by root = %#x", br)
|
||||
}
|
||||
b, err := vu.UnmarshalBeaconBlock(bb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
|
||||
}
|
||||
realBlockRoot, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
|
||||
}
|
||||
|
||||
log.Printf("BeaconState slot=%d, Block slot=%d", s.Slot(), b.Block().Slot())
|
||||
log.Printf("BeaconState htr=%#xd, Block state_root=%#x", sr, b.Block().StateRoot())
|
||||
log.Printf("BeaconState latest_block_header htr=%#xd, block htr=%#x", br, realBlockRoot)
|
||||
return &OriginData{
|
||||
st: s,
|
||||
b: b,
|
||||
sb: sb,
|
||||
bb: bb,
|
||||
vu: vu,
|
||||
br: br,
|
||||
sr: sr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + SignedBeaconBlock
|
||||
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
|
||||
// a weak subjectivity checkpoint beacon node flag to be used for validation.
|
||||
type WeakSubjectivityData struct {
|
||||
BlockRoot [32]byte
|
||||
StateRoot [32]byte
|
||||
Epoch types.Epoch
|
||||
}
|
||||
|
||||
// CheckpointString returns the standard string representation of a Checkpoint.
|
||||
// The format is a a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
|
||||
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
|
||||
func (wsd *WeakSubjectivityData) CheckpointString() string {
|
||||
return fmt.Sprintf("%#x:%d", wsd.BlockRoot, wsd.Epoch)
|
||||
}
|
||||
|
||||
// ComputeWeakSubjectivityCheckpoint attempts to use the prysm weak_subjectivity api
|
||||
// to obtain the current weak_subjectivity checkpoint.
|
||||
// For non-prysm nodes, the same computation will be performed with extra steps,
|
||||
// using the head state downloaded from the beacon node api.
|
||||
func ComputeWeakSubjectivityCheckpoint(ctx context.Context, client *Client) (*WeakSubjectivityData, error) {
|
||||
ws, err := client.GetWeakSubjectivity(ctx)
|
||||
if err != nil {
|
||||
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
|
||||
if !errors.Is(err, ErrNotOK) {
|
||||
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
|
||||
}
|
||||
// fall back to vanilla Beacon Node API method
|
||||
return computeBackwardsCompatible(ctx, client)
|
||||
}
|
||||
log.Printf("server weak subjectivity checkpoint response - epoch=%d, block_root=%#x, state_root=%#x", ws.Epoch, ws.BlockRoot, ws.StateRoot)
|
||||
return ws, nil
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -96,8 +153,8 @@ const (
|
||||
prysmImplementationName = "Prysm"
|
||||
)
|
||||
|
||||
// ErrUnsupportedPrysmCheckpointVersion indicates remote beacon node can't be used for checkpoint retrieval.
|
||||
var ErrUnsupportedPrysmCheckpointVersion = errors.New("node does not meet minimum version requirements for checkpoint retrieval")
|
||||
// errUnsupportedPrysmCheckpointVersion indicates remote beacon node can't be used for checkpoint retrieval.
|
||||
var errUnsupportedPrysmCheckpointVersion = errors.New("node does not meet minimum version requirements for checkpoint retrieval")
|
||||
|
||||
// for older endpoints or clients that do not support the weak_subjectivity api method
|
||||
// we gather the necessary data for a checkpoint sync by:
|
||||
@@ -105,14 +162,14 @@ var ErrUnsupportedPrysmCheckpointVersion = errors.New("node does not meet minimu
|
||||
// - requesting the state at the first slot of the epoch
|
||||
// - using hash_tree_root(state.latest_block_header) to compute the block the state integrates
|
||||
// - requesting that block by its root
|
||||
func downloadBackwardsCompatible(ctx context.Context, client *Client) (*OriginData, error) {
|
||||
func computeBackwardsCompatible(ctx context.Context, client *Client) (*WeakSubjectivityData, error) {
|
||||
log.Print("falling back to generic checkpoint derivation, weak_subjectivity API not supported by server")
|
||||
nv, err := client.GetNodeVersion(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to proceed with fallback method without confirming node version")
|
||||
}
|
||||
if nv.implementation == prysmImplementationName && semver.Compare(nv.semver, prysmMinimumVersion) < 0 {
|
||||
return nil, errors.Wrapf(ErrUnsupportedPrysmCheckpointVersion, "%s < minimum (%s)", nv.semver, prysmMinimumVersion)
|
||||
return nil, errors.Wrapf(errUnsupportedPrysmCheckpointVersion, "%s < minimum (%s)", nv.semver, prysmMinimumVersion)
|
||||
}
|
||||
epoch, err := getWeakSubjectivityEpochFromHead(ctx, client)
|
||||
if err != nil {
|
||||
@@ -127,136 +184,78 @@ func downloadBackwardsCompatible(ctx context.Context, client *Client) (*OriginDa
|
||||
|
||||
log.Printf("requesting checkpoint state at slot %d", slot)
|
||||
// get the state at the first slot of the epoch
|
||||
stateBytes, err := client.GetState(ctx, IdFromSlot(slot))
|
||||
sb, err := client.GetState(ctx, IdFromSlot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to request state by slot from api, slot=%d", slot)
|
||||
}
|
||||
|
||||
// ConfigFork is used to unmarshal the BeaconState so we can read the block root in latest_block_header
|
||||
cf, err := detect.FromState(stateBytes)
|
||||
vu, err := detect.FromState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", cf.Config.ConfigName, version.String(cf.Fork))
|
||||
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
|
||||
|
||||
st, err := cf.UnmarshalBeaconState(stateBytes)
|
||||
s, err := vu.UnmarshalBeaconState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error using detected config fork to unmarshal state bytes")
|
||||
}
|
||||
|
||||
// compute state and block roots
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
sr, err := s.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of state")
|
||||
}
|
||||
|
||||
header := st.LatestBlockHeader()
|
||||
header.StateRoot = stateRoot[:]
|
||||
computedBlockRoot, err := header.HashTreeRoot()
|
||||
h := s.LatestBlockHeader()
|
||||
h.StateRoot = sr[:]
|
||||
br, err := h.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error while computing block root using state data")
|
||||
}
|
||||
|
||||
blockBytes, err := client.GetBlock(ctx, IdFromRoot(computedBlockRoot))
|
||||
bb, err := client.GetBlock(ctx, IdFromRoot(br))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting block by root = %d", computedBlockRoot)
|
||||
return nil, errors.Wrapf(err, "error requesting block by root = %d", br)
|
||||
}
|
||||
block, err := cf.UnmarshalBeaconBlock(blockBytes)
|
||||
b, err := vu.UnmarshalBeaconBlock(bb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
|
||||
}
|
||||
blockRoot, err := block.Block().HashTreeRoot()
|
||||
br, err = b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root for block obtained via root")
|
||||
}
|
||||
|
||||
log.Printf("BeaconState slot=%d, Block slot=%d", st.Slot(), block.Block().Slot())
|
||||
log.Printf("BeaconState htr=%#xd, Block state_root=%#x", stateRoot, block.Block().StateRoot())
|
||||
log.Printf("BeaconBlock root computed from state=%#x, Block htr=%#x", computedBlockRoot, blockRoot)
|
||||
|
||||
return &OriginData{
|
||||
wsd: &WeakSubjectivityData{
|
||||
BlockRoot: blockRoot,
|
||||
StateRoot: stateRoot,
|
||||
Epoch: epoch,
|
||||
},
|
||||
st: st,
|
||||
sb: stateBytes,
|
||||
b: block,
|
||||
bb: blockBytes,
|
||||
cf: cf,
|
||||
return &WeakSubjectivityData{
|
||||
Epoch: epoch,
|
||||
BlockRoot: br,
|
||||
StateRoot: sr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DownloadOriginData attempts to use the proposed weak_subjectivity beacon node api
|
||||
// to obtain the weak_subjectivity metadata (epoch, block_root, state_root) needed to sync
|
||||
// a beacon node from the canonical weak subjectivity checkpoint. As this is a proposed API
|
||||
// that will only be supported by prysm at first, in the event of a 404 we fallback to using a
|
||||
// different technique where we first download the head state which can be used to compute the
|
||||
// weak subjectivity epoch on the client side.
|
||||
func DownloadOriginData(ctx context.Context, client *Client) (*OriginData, error) {
|
||||
ws, err := client.GetWeakSubjectivity(ctx)
|
||||
// this method downloads the head state, which can be used to find the correct chain config
|
||||
// and use prysm's helper methods to compute the latest weak subjectivity epoch.
|
||||
func getWeakSubjectivityEpochFromHead(ctx context.Context, client *Client) (types.Epoch, error) {
|
||||
headBytes, err := client.GetState(ctx, IdHead)
|
||||
if err != nil {
|
||||
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
|
||||
if !errors.Is(err, ErrNotOK) {
|
||||
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
|
||||
}
|
||||
// fall back to vanilla Beacon Node API method
|
||||
return downloadBackwardsCompatible(ctx, client)
|
||||
return 0, err
|
||||
}
|
||||
log.Printf("server weak subjectivity checkpoint response - epoch=%d, block_root=%#x, state_root=%#x", ws.Epoch, ws.BlockRoot, ws.StateRoot)
|
||||
|
||||
// use first slot of the epoch for the block slot
|
||||
slot, err := slots.EpochStart(ws.Epoch)
|
||||
vu, err := detect.FromState(headBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error computing first slot of epoch=%d", ws.Epoch)
|
||||
return 0, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("requesting checkpoint state at slot %d", slot)
|
||||
|
||||
stateBytes, err := client.GetState(ctx, IdFromSlot(slot))
|
||||
log.Printf("detected supported config in remote head state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
|
||||
headState, err := vu.UnmarshalBeaconState(headBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to request state by slot from api, slot=%d", slot)
|
||||
}
|
||||
cf, err := detect.FromState(stateBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", cf.Config.ConfigName, version.String(cf.Fork))
|
||||
|
||||
state, err := cf.UnmarshalBeaconState(stateBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error using detected config fork to unmarshal state bytes")
|
||||
}
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to compute htr for state at slot=%d", slot)
|
||||
return 0, errors.Wrap(err, "error unmarshaling state to correct version")
|
||||
}
|
||||
|
||||
blockRoot, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
epoch, err := helpers.LatestWeakSubjectivityEpoch(ctx, headState, vu.Config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of latest_block_header")
|
||||
return 0, errors.Wrap(err, "error computing the weak subjectivity epoch from head state")
|
||||
}
|
||||
blockBytes, err := client.GetBlock(ctx, IdFromRoot(ws.BlockRoot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting block by slot = %d", slot)
|
||||
}
|
||||
block, err := cf.UnmarshalBeaconBlock(blockBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
|
||||
}
|
||||
realBlockRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
|
||||
}
|
||||
log.Printf("BeaconState slot=%d, Block slot=%d", state.Slot(), block.Block().Slot())
|
||||
log.Printf("BeaconState htr=%#xd, Block state_root=%#x", stateRoot, block.Block().StateRoot())
|
||||
log.Printf("BeaconState latest_block_header htr=%#xd, block htr=%#x", blockRoot, realBlockRoot)
|
||||
return &OriginData{
|
||||
wsd: ws,
|
||||
st: state,
|
||||
b: block,
|
||||
sb: stateBytes,
|
||||
bb: blockBytes,
|
||||
cf: cf,
|
||||
}, nil
|
||||
|
||||
log.Printf("(computed client-side) weak subjectivity epoch = %d", epoch)
|
||||
return epoch, nil
|
||||
}
|
||||
|
||||
@@ -93,8 +93,8 @@ func TestFallbackVersionCheck(t *testing.T) {
|
||||
}}
|
||||
|
||||
ctx := context.Background()
|
||||
_, err := DownloadOriginData(ctx, c)
|
||||
require.ErrorIs(t, err, ErrUnsupportedPrysmCheckpointVersion)
|
||||
_, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
|
||||
require.ErrorIs(t, err, errUnsupportedPrysmCheckpointVersion)
|
||||
}
|
||||
|
||||
func TestFname(t *testing.T) {
|
||||
@@ -120,9 +120,9 @@ func TestFname(t *testing.T) {
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestDownloadOriginData(t *testing.T) {
|
||||
func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := params.MainnetConfig()
|
||||
cfg := params.MainnetConfig().Copy()
|
||||
|
||||
epoch := cfg.AltairForkEpoch - 1
|
||||
// set up checkpoint state, using the epoch that will be computed as the ws checkpoint state based on the head state
|
||||
@@ -204,19 +204,15 @@ func TestDownloadOriginData(t *testing.T) {
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
|
||||
od, err := DownloadOriginData(ctx, c)
|
||||
wsd, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedWSD.Epoch, od.wsd.Epoch)
|
||||
require.Equal(t, expectedWSD.StateRoot, od.wsd.StateRoot)
|
||||
require.Equal(t, expectedWSD.BlockRoot, od.wsd.BlockRoot)
|
||||
require.DeepEqual(t, wsSerialized, od.sb)
|
||||
require.DeepEqual(t, serBlock, od.bb)
|
||||
require.DeepEqual(t, wst.Fork().CurrentVersion, od.cf.Version[:])
|
||||
require.DeepEqual(t, version.Phase0, od.cf.Fork)
|
||||
require.Equal(t, expectedWSD.Epoch, wsd.Epoch)
|
||||
require.Equal(t, expectedWSD.StateRoot, wsd.StateRoot)
|
||||
require.Equal(t, expectedWSD.BlockRoot, wsd.BlockRoot)
|
||||
}
|
||||
|
||||
// runs downloadBackwardsCompatible directly
|
||||
// and via DownloadOriginData with a round tripper that triggers the backwards compatible code path
|
||||
// runs computeBackwardsCompatible directly
|
||||
// and via ComputeWeakSubjectivityCheckpoint with a round tripper that triggers the backwards compatible code path
|
||||
func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := params.MainnetConfig()
|
||||
@@ -297,16 +293,12 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
|
||||
odPub, err := DownloadOriginData(ctx, c)
|
||||
wsPub, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
|
||||
require.NoError(t, err)
|
||||
|
||||
odPriv, err := downloadBackwardsCompatible(ctx, c)
|
||||
wsPriv, err := computeBackwardsCompatible(ctx, c)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, odPriv.wsd, odPub.wsd)
|
||||
require.DeepEqual(t, odPriv.sb, odPub.sb)
|
||||
require.DeepEqual(t, odPriv.bb, odPub.bb)
|
||||
require.DeepEqual(t, odPriv.cf.Fork, odPub.cf.Fork)
|
||||
require.DeepEqual(t, odPriv.cf.Version, odPub.cf.Version)
|
||||
require.DeepEqual(t, wsPriv, wsPub)
|
||||
}
|
||||
|
||||
func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
|
||||
@@ -402,3 +394,94 @@ func populateValidators(cfg *params.BeaconChainConfig, st state.BeaconState, val
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestDownloadFinalizedData(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := params.MainnetConfig().Copy()
|
||||
|
||||
// avoid the altair zone because genesis tests are easier to set up
|
||||
epoch := cfg.AltairForkEpoch - 1
|
||||
// set up checkpoint state, using the epoch that will be computed as the ws checkpoint state based on the head state
|
||||
slot, err := slots.EpochStart(epoch)
|
||||
require.NoError(t, err)
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
fork, err := forkForEpoch(cfg, epoch)
|
||||
require.NoError(t, st.SetFork(fork))
|
||||
|
||||
// set up checkpoint block
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, wrapper.SetBlockParentRoot(b, cfg.ZeroHash))
|
||||
require.NoError(t, wrapper.SetBlockSlot(b, slot))
|
||||
require.NoError(t, wrapper.SetProposerIndex(b, 0))
|
||||
|
||||
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
|
||||
header, err := b.Header()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestBlockHeader(header.Header))
|
||||
|
||||
// order of operations can be confusing here:
|
||||
// - when computing the state root, make sure block header is complete, EXCEPT the state root should be zero-value
|
||||
// - before computing the block root (to match the request route), the block should include the state root
|
||||
// *computed from the state with a header that does not have a state root set yet*
|
||||
sr, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, wrapper.SetBlockStateRoot(b, sr))
|
||||
mb, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
br, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ms, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
hc := &http.Client{
|
||||
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case renderGetStatePath(IdFinalized):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(ms))
|
||||
case renderGetBlockPath(IdFromRoot(br)):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(mb))
|
||||
default:
|
||||
res.StatusCode = http.StatusInternalServerError
|
||||
res.Body = io.NopCloser(bytes.NewBufferString(""))
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}},
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
|
||||
// sanity check before we go through checkpoint
|
||||
// make sure we can download the state and unmarshal it with the VersionedUnmarshaler
|
||||
sb, err := c.GetState(ctx, IdFinalized)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(sb, ms))
|
||||
vu, err := detect.FromState(sb)
|
||||
require.NoError(t, err)
|
||||
us, err := vu.UnmarshalBeaconState(sb)
|
||||
require.NoError(t, err)
|
||||
ushtr, err := us.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, sr, ushtr)
|
||||
|
||||
expected := &OriginData{
|
||||
sb: ms,
|
||||
bb: mb,
|
||||
br: br,
|
||||
sr: sr,
|
||||
}
|
||||
od, err := DownloadFinalizedData(ctx, c)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expected.sb, od.sb))
|
||||
require.Equal(t, true, bytes.Equal(expected.bb, od.bb))
|
||||
require.Equal(t, expected.br, od.br)
|
||||
require.Equal(t, expected.sr, od.sr)
|
||||
}
|
||||
|
||||
@@ -46,8 +46,9 @@ const (
|
||||
type StateOrBlockId string
|
||||
|
||||
const (
|
||||
IdGenesis StateOrBlockId = "genesis"
|
||||
IdHead StateOrBlockId = "head"
|
||||
IdGenesis StateOrBlockId = "genesis"
|
||||
IdHead StateOrBlockId = "head"
|
||||
IdFinalized StateOrBlockId = "finalized"
|
||||
)
|
||||
|
||||
var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
|
||||
@@ -344,16 +345,6 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + SignedBeaconBlock
|
||||
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
|
||||
// a weak subjectivity checkpoint, or to download a BeaconState+SignedBeaconBlock pair that can be used to bootstrap
|
||||
// a new Beacon Node using Checkpoint Sync.
|
||||
type WeakSubjectivityData struct {
|
||||
BlockRoot [32]byte
|
||||
StateRoot [32]byte
|
||||
Epoch types.Epoch
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
var body string
|
||||
|
||||
@@ -29,7 +29,7 @@ go_test(
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
|
||||
@@ -28,6 +28,7 @@ const (
|
||||
)
|
||||
|
||||
var errMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
|
||||
var errMalformedRequest = errors.New("required request data are missing")
|
||||
|
||||
// ClientOpt is a functional option for the Client type (http.Client wrapper)
|
||||
type ClientOpt func(*Client)
|
||||
@@ -199,9 +200,15 @@ func (c *Client) GetHeader(ctx context.Context, slot types.Slot, parentHash [32]
|
||||
|
||||
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
|
||||
// fields with 0x prefixes) and posts to the builder validator registration endpoint.
|
||||
func (c *Client) RegisterValidator(ctx context.Context, svr *ethpb.SignedValidatorRegistrationV1) error {
|
||||
v := &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr}
|
||||
body, err := json.Marshal(v)
|
||||
func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error {
|
||||
if len(svr) == 0 {
|
||||
return errors.Wrap(errMalformedRequest, "empty validator registration list")
|
||||
}
|
||||
vs := make([]*SignedValidatorRegistration, len(svr))
|
||||
for i := 0; i < len(svr); i++ {
|
||||
vs[i] = &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr[i]}
|
||||
}
|
||||
body, err := json.Marshal(vs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error encoding the SignedValidatorRegistration value body in RegisterValidator")
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -73,7 +73,7 @@ func TestClient_Status(t *testing.T) {
|
||||
|
||||
func TestClient_RegisterValidator(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
expectedBody := `{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"}}`
|
||||
expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"}}]`
|
||||
expectedPath := "/eth/v1/builder/validators"
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
@@ -98,13 +98,13 @@ func TestClient_RegisterValidator(t *testing.T) {
|
||||
}
|
||||
reg := ð.SignedValidatorRegistrationV1{
|
||||
Message: ð.ValidatorRegistrationV1{
|
||||
FeeRecipient: ezDecode(t, fieldparams.EthBurnAddressHex),
|
||||
FeeRecipient: ezDecode(t, params.BeaconConfig().EthBurnAddressHex),
|
||||
GasLimit: 23,
|
||||
Timestamp: 42,
|
||||
Pubkey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
},
|
||||
}
|
||||
require.NoError(t, c.RegisterValidator(ctx, reg))
|
||||
require.NoError(t, c.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{reg}))
|
||||
}
|
||||
|
||||
func TestClient_GetHeader(t *testing.T) {
|
||||
|
||||
@@ -63,7 +63,7 @@ func sszBytesToUint256(b []byte) Uint256 {
|
||||
|
||||
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256
|
||||
func (s Uint256) SSZBytes() []byte {
|
||||
return bytesutil.ReverseByteOrder(s.Int.Bytes())
|
||||
return bytesutil.PadTo(bytesutil.ReverseByteOrder(s.Int.Bytes()), 32)
|
||||
}
|
||||
|
||||
var errUnmarshalUint256Failed = errors.New("unable to UnmarshalText into a Uint256 value")
|
||||
|
||||
@@ -694,9 +694,10 @@ func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRoundTripUint256(t *testing.T) {
|
||||
vs := "452312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
vs := "4523128485832663883733241601901871400518358776001584532791311875309106626"
|
||||
u := stringToUint256(vs)
|
||||
sb := u.SSZBytes()
|
||||
require.Equal(t, 32, len(sb))
|
||||
uu := sszBytesToUint256(sb)
|
||||
require.Equal(t, true, bytes.Equal(u.SSZBytes(), uu.SSZBytes()))
|
||||
require.Equal(t, vs, uu.String())
|
||||
|
||||
@@ -31,26 +31,26 @@ func processField(s interface{}, processors []fieldProcessor) error {
|
||||
sliceElem := t.Field(i).Type.Elem()
|
||||
kind := sliceElem.Kind()
|
||||
// Recursively process slices to struct pointers.
|
||||
if kind == reflect.Ptr && sliceElem.Elem().Kind() == reflect.Struct {
|
||||
switch {
|
||||
case kind == reflect.Ptr && sliceElem.Elem().Kind() == reflect.Struct:
|
||||
for j := 0; j < v.Field(i).Len(); j++ {
|
||||
if err := processField(v.Field(i).Index(j).Interface(), processors); err != nil {
|
||||
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Process each string in string slices.
|
||||
if kind == reflect.String {
|
||||
case kind == reflect.String:
|
||||
for _, proc := range processors {
|
||||
_, hasTag := t.Field(i).Tag.Lookup(proc.tag)
|
||||
if hasTag {
|
||||
for j := 0; j < v.Field(i).Len(); j++ {
|
||||
if err := proc.f(v.Field(i).Index(j)); err != nil {
|
||||
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
||||
}
|
||||
if !hasTag {
|
||||
continue
|
||||
}
|
||||
for j := 0; j < v.Field(i).Len(); j++ {
|
||||
if err := proc.f(v.Field(i).Index(j)); err != nil {
|
||||
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
// Recursively process struct pointers.
|
||||
case reflect.Ptr:
|
||||
|
||||
@@ -121,8 +121,9 @@ func (g *Gateway) Start() {
|
||||
}
|
||||
|
||||
g.server = &http.Server{
|
||||
Addr: g.cfg.gatewayAddr,
|
||||
Handler: corsMux,
|
||||
Addr: g.cfg.gatewayAddr,
|
||||
Handler: corsMux,
|
||||
ReadHeaderTimeout: time.Second,
|
||||
}
|
||||
|
||||
go func() {
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
"head_sync_committee_info.go",
|
||||
"init_sync_process_block.go",
|
||||
"log.go",
|
||||
"merge_ascii_art.go",
|
||||
"metrics.go",
|
||||
"new_slot.go",
|
||||
"options.go",
|
||||
@@ -137,6 +138,7 @@ go_test(
|
||||
"//beacon-chain/powchain/testing:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//beacon-chain/state/v3:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
|
||||
@@ -9,7 +9,9 @@ import (
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
@@ -27,6 +29,48 @@ var _ ChainInfoFetcher = (*Service)(nil)
|
||||
var _ TimeFetcher = (*Service)(nil)
|
||||
var _ ForkFetcher = (*Service)(nil)
|
||||
|
||||
// prepareForkchoiceState prepares a beacon state with the given data to mock
|
||||
// insert into forkchoice
|
||||
func prepareForkchoiceState(
|
||||
_ context.Context,
|
||||
slot types.Slot,
|
||||
blockRoot [32]byte,
|
||||
parentRoot [32]byte,
|
||||
payloadHash [32]byte,
|
||||
justifiedEpoch types.Epoch,
|
||||
finalizedEpoch types.Epoch,
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
blockHeader := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
|
||||
executionHeader := ðpb.ExecutionPayloadHeader{
|
||||
BlockHash: payloadHash[:],
|
||||
}
|
||||
|
||||
justifiedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: justifiedEpoch,
|
||||
}
|
||||
|
||||
finalizedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: finalizedEpoch,
|
||||
}
|
||||
|
||||
base := ðpb.BeaconStateBellatrix{
|
||||
Slot: slot,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
BlockRoots: make([][]byte, 1),
|
||||
CurrentJustifiedCheckpoint: justifiedCheckpoint,
|
||||
FinalizedCheckpoint: finalizedCheckpoint,
|
||||
LatestExecutionPayloadHeader: executionHeader,
|
||||
LatestBlockHeader: blockHeader,
|
||||
}
|
||||
|
||||
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
||||
st, err := v3.InitializeFromProto(base)
|
||||
return st, blockRoot, err
|
||||
}
|
||||
|
||||
func TestHeadRoot_Nil(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
@@ -36,9 +80,9 @@ func TestHeadRoot_Nil(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_ForkChoiceStore(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
p := c.ForkChoiceStore()
|
||||
require.Equal(t, 0, int(p.FinalizedEpoch()))
|
||||
require.Equal(t, types.Epoch(0), p.FinalizedCheckpoint().Epoch)
|
||||
}
|
||||
|
||||
func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
|
||||
@@ -283,26 +327,55 @@ func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
|
||||
}
|
||||
func TestService_ChainHeads_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
roots, slots := c.ChainHeads()
|
||||
require.DeepEqual(t, [][32]byte{{'c'}, {'d'}, {'e'}}, roots)
|
||||
require.DeepEqual(t, []types.Slot{102, 103, 104}, slots)
|
||||
}
|
||||
|
||||
//
|
||||
// A <- B <- C
|
||||
// \ \
|
||||
// \ ---------- E
|
||||
// ---------- D
|
||||
|
||||
func TestService_ChainHeads_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
roots, slots := c.ChainHeads()
|
||||
require.Equal(t, 3, len(roots))
|
||||
@@ -379,9 +452,13 @@ func TestService_IsOptimistic_ProtoArray(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -395,9 +472,13 @@ func TestService_IsOptimistic_DoublyLinkedTree(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -414,9 +495,13 @@ func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
|
||||
|
||||
func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
@@ -425,9 +510,13 @@ func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
|
||||
|
||||
func TestService_IsOptimisticForRoot_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
@@ -437,7 +526,7 @@ func TestService_IsOptimisticForRoot_DoublyLinkedTree(t *testing.T) {
|
||||
func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
@@ -502,7 +591,7 @@ func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
|
||||
func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
@@ -566,7 +655,7 @@ func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
|
||||
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !develop
|
||||
// +build !develop
|
||||
|
||||
package blockchain
|
||||
|
||||
|
||||
@@ -27,6 +27,8 @@ var (
|
||||
errWSBlockNotFound = errors.New("weak subjectivity root not found in db")
|
||||
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
|
||||
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
|
||||
// errNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
|
||||
errNotDescendantOfFinalized = invalidBlock{errors.New("not descendant of finalized checkpoint")}
|
||||
)
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
@@ -270,10 +269,10 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
recipient, err := s.cfg.BeaconDB.FeeRecipientByValidatorID(ctx, proposerID)
|
||||
switch {
|
||||
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
|
||||
if feeRecipient.String() == fieldparams.EthBurnAddressHex {
|
||||
if feeRecipient.String() == params.BeaconConfig().EthBurnAddressHex {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"validatorIndex": proposerID,
|
||||
"burnAddress": fieldparams.EthBurnAddressHex,
|
||||
"burnAddress": params.BeaconConfig().EthBurnAddressHex,
|
||||
}).Warn("Fee recipient is currently using the burn address, " +
|
||||
"you will not be rewarded transaction fees on this setting. " +
|
||||
"Please set a different eth address as the fee recipient. " +
|
||||
|
||||
@@ -11,9 +11,10 @@ import (
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
bstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
@@ -43,7 +44,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, altairBlk))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -51,13 +52,20 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
st, _ := util.DeterministicGenesisState(t, 10)
|
||||
service.head = &head{
|
||||
state: st,
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 1, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -177,7 +185,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, tt.finalizedRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, tt.finalizedRoot))
|
||||
fc := ðpb.Checkpoint{Epoch: 1, Root: tt.finalizedRoot[:]}
|
||||
fc := ðpb.Checkpoint{Epoch: 0, Root: tt.finalizedRoot[:]}
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(fc, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(fc, [32]byte{'b'})
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
@@ -185,7 +193,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
headRoot: tt.headRoot,
|
||||
headBlock: tt.blk,
|
||||
}
|
||||
_, err := service.notifyForkchoiceUpdate(ctx, arg)
|
||||
_, err = service.notifyForkchoiceUpdate(ctx, arg)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
@@ -282,7 +290,7 @@ func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbg))
|
||||
|
||||
// Insert blocks into forkchoice
|
||||
fcs := doublylinkedtree.New(0, 0)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -292,20 +300,36 @@ func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
service.justifiedBalances.balances = []uint64{50, 100, 200}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 2, brb, bra, [32]byte{'B'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 3, brc, brb, [32]byte{'C'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 4, brd, brc, [32]byte{'D'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 5, bre, brb, [32]byte{'E'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 6, brf, bre, [32]byte{'F'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 7, brg, bre, [32]byte{'G'}, 0, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, brc, brb, [32]byte{'C'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, brd, brc, [32]byte{'D'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, bre, brb, [32]byte{'E'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 6, brf, bre, [32]byte{'F'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 7, brg, bre, [32]byte{'G'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// Insert Attestations to D, F and G so that they have higher weight than D
|
||||
// Ensure G is head
|
||||
fcs.ProcessAttestation(ctx, []uint64{0}, brd, 1)
|
||||
fcs.ProcessAttestation(ctx, []uint64{1}, brf, 1)
|
||||
fcs.ProcessAttestation(ctx, []uint64{2}, brg, 1)
|
||||
headRoot, err := fcs.Head(ctx, bra, []uint64{50, 100, 200})
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: bra}
|
||||
require.NoError(t, fcs.UpdateJustifiedCheckpoint(jc))
|
||||
headRoot, err := fcs.Head(ctx, []uint64{50, 100, 200})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, brg, headRoot)
|
||||
|
||||
@@ -326,7 +350,7 @@ func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
|
||||
_, err = service.notifyForkchoiceUpdate(ctx, a)
|
||||
require.ErrorIs(t, ErrInvalidPayload, err)
|
||||
// Ensure Head is D
|
||||
headRoot, err = fcs.Head(ctx, bra, service.justifiedBalances.balances)
|
||||
headRoot, err = fcs.Head(ctx, service.justifiedBalances.balances)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, brd, headRoot)
|
||||
|
||||
@@ -343,7 +367,7 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -383,11 +407,15 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
r, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 1, r, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, r, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
tests := []struct {
|
||||
postState state.BeaconState
|
||||
postState bstate.BeaconState
|
||||
invalidBlock bool
|
||||
isValidPayload bool
|
||||
blk interfaces.SignedBeaconBlock
|
||||
@@ -531,7 +559,9 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
root := [32]byte{'a'}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, root, root, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, root, root, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(tt.postState)
|
||||
require.NoError(t, err)
|
||||
isValidPayload, err := service.notifyNewPayload(ctx, postVersion, postHeader, tt.blk)
|
||||
@@ -555,7 +585,7 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -598,7 +628,7 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -779,7 +809,7 @@ func Test_GetPayloadAttribute(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, fieldparams.EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient).String())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient).String())
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
@@ -800,7 +830,7 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
stateGen := stategen.New(beaconDB)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stateGen),
|
||||
@@ -816,8 +846,9 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
genesisRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, genesisRoot, params.BeaconConfig().ZeroHash,
|
||||
params.BeaconConfig().ZeroHash, 0, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, genesisRoot, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
genesisSummary := ðpb.StateSummary{
|
||||
Root: genesisStateRoot[:],
|
||||
Slot: 0,
|
||||
@@ -848,8 +879,9 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
Slot: 320,
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, opStateSummary))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 320, opRoot, genesisRoot,
|
||||
params.BeaconConfig().ZeroHash, 10, 10))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 320, opRoot, genesisRoot, params.BeaconConfig().ZeroHash, 10, 10)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, opRoot))
|
||||
require.NoError(t, service.updateFinalized(ctx, opCheckpoint))
|
||||
cp, err := service.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
|
||||
@@ -876,8 +908,9 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
Slot: 640,
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, validSummary))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 640, validRoot, params.BeaconConfig().ZeroHash,
|
||||
params.BeaconConfig().ZeroHash, 20, 20))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 640, validRoot, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 20, 20)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, fcs.SetOptimisticToValid(ctx, validRoot))
|
||||
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, validRoot))
|
||||
require.NoError(t, service.updateFinalized(ctx, validCheckpoint))
|
||||
@@ -897,7 +930,7 @@ func TestService_removeInvalidBlockAndState(t *testing.T) {
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(protoarray.New(0, 0)),
|
||||
WithForkChoiceStore(protoarray.New()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -953,7 +986,7 @@ func TestService_getPayloadHash(t *testing.T) {
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(protoarray.New(0, 0)),
|
||||
WithForkChoiceStore(protoarray.New()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -9,8 +9,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
@@ -78,10 +80,7 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte,
|
||||
}
|
||||
// To get head before the first justified epoch, the fork choice will start with origin root
|
||||
// instead of zero hashes.
|
||||
headStartRoot := bytesutil.ToBytes32(j.Root)
|
||||
if headStartRoot == params.BeaconConfig().ZeroHash {
|
||||
headStartRoot = s.originBlockRoot
|
||||
}
|
||||
headStartRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(j.Root))
|
||||
|
||||
// In order to process head, fork choice store requires justified info.
|
||||
// If the fork choice store is missing justified block info, a node should
|
||||
@@ -92,31 +91,42 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte,
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
if features.Get().EnableForkChoiceDoublyLinkedTree {
|
||||
s.cfg.ForkChoiceStore = doublylinkedtree.New(j.Epoch, f.Epoch)
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch)
|
||||
st, err := s.cfg.StateGen.StateByRoot(ctx, s.ensureRootNotZeros(headStartRoot))
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j); err != nil {
|
||||
if features.Get().EnableForkChoiceDoublyLinkedTree {
|
||||
s.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore = protoarray.New()
|
||||
}
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, st, f, j); err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return s.cfg.ForkChoiceStore.Head(ctx, headStartRoot, balances)
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: j.Epoch, Root: headStartRoot}
|
||||
fc := &forkchoicetypes.Checkpoint{Epoch: f.Epoch, Root: s.ensureRootNotZeros(bytesutil.ToBytes32(f.Root))}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(jc); err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fc); err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.Head(ctx, balances)
|
||||
}
|
||||
|
||||
// This saves head info to the local service cache, it also saves the
|
||||
// new head root to the DB.
|
||||
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock interfaces.SignedBeaconBlock, headState state.BeaconState) error {
|
||||
func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock interfaces.SignedBeaconBlock, headState state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
|
||||
defer span.End()
|
||||
|
||||
// Do nothing if head hasn't changed.
|
||||
r, err := s.HeadRoot(ctx)
|
||||
oldHeadroot, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if headRoot == bytesutil.ToBytes32(r) {
|
||||
if newHeadRoot == bytesutil.ToBytes32(oldHeadroot) {
|
||||
return nil
|
||||
}
|
||||
if err := wrapper.BeaconBlockIsNil(headBlock); err != nil {
|
||||
@@ -128,17 +138,19 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock int
|
||||
|
||||
// If the head state is not available, just return nil.
|
||||
// There's nothing to cache
|
||||
if !s.cfg.BeaconDB.HasStateSummary(ctx, headRoot) {
|
||||
if !s.cfg.BeaconDB.HasStateSummary(ctx, newHeadRoot) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
headSlot := s.HeadSlot()
|
||||
newHeadSlot := headBlock.Block().Slot()
|
||||
s.headLock.RLock()
|
||||
oldHeadRoot := s.headRoot()
|
||||
oldStateRoot := s.headBlock().Block().StateRoot()
|
||||
s.headLock.RUnlock()
|
||||
headSlot := s.HeadSlot()
|
||||
newHeadSlot := headBlock.Block().Slot()
|
||||
newStateRoot := headBlock.Block().StateRoot()
|
||||
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != bytesutil.ToBytes32(r) {
|
||||
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != bytesutil.ToBytes32(oldHeadroot) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||
"oldSlot": fmt.Sprintf("%d", headSlot),
|
||||
@@ -154,7 +166,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock int
|
||||
Slot: newHeadSlot,
|
||||
Depth: absoluteSlotDifference,
|
||||
OldHeadBlock: oldHeadRoot[:],
|
||||
NewHeadBlock: headRoot[:],
|
||||
NewHeadBlock: newHeadRoot[:],
|
||||
OldHeadState: oldStateRoot,
|
||||
NewHeadState: newStateRoot,
|
||||
Epoch: slots.ToEpoch(newHeadSlot),
|
||||
@@ -162,25 +174,24 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock int
|
||||
},
|
||||
})
|
||||
|
||||
if err := s.saveOrphanedAtts(ctx, bytesutil.ToBytes32(r)); err != nil {
|
||||
if err := s.saveOrphanedAtts(ctx, bytesutil.ToBytes32(oldHeadroot), newHeadRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reorgCount.Inc()
|
||||
}
|
||||
|
||||
// Cache the new head info.
|
||||
s.setHead(headRoot, headBlock, headState)
|
||||
s.setHead(newHeadRoot, headBlock, headState)
|
||||
|
||||
// Save the new head root to DB.
|
||||
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
|
||||
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, newHeadRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save head root in DB")
|
||||
}
|
||||
|
||||
// Forward an event capturing a new chain head over a common event feed
|
||||
// done in a goroutine to avoid blocking the critical runtime main routine.
|
||||
go func() {
|
||||
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot, headRoot[:]); err != nil {
|
||||
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot, newHeadRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not notify event feed of new chain head")
|
||||
}
|
||||
}()
|
||||
@@ -353,35 +364,48 @@ func (s *Service) notifyNewHeadEvent(
|
||||
return nil
|
||||
}
|
||||
|
||||
// This saves the attestations inside the beacon block with respect to root `orphanedRoot` back into the
|
||||
// attestation pool. It also filters out the attestations that is one epoch older as a
|
||||
// defense so invalid attestations don't flow into the attestation pool.
|
||||
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte) error {
|
||||
orphanedBlk, err := s.getBlock(ctx, orphanedRoot)
|
||||
if err != nil {
|
||||
// This saves the attestations between `orphanedRoot` and the common ancestor root that is derived using `newHeadRoot`.
|
||||
// It also filters out the attestations that is one epoch older as a defense so invalid attestations don't flow into the attestation pool.
|
||||
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
|
||||
commonAncestorRoot, err := s.ForkChoicer().CommonAncestorRoot(ctx, newHeadRoot, orphanedRoot)
|
||||
switch {
|
||||
// Exit early if there's no common ancestor as there would be nothing to save.
|
||||
case errors.Is(err, forkchoice.ErrUnknownCommonAncestor):
|
||||
return nil
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
|
||||
if orphanedBlk == nil || orphanedBlk.IsNil() {
|
||||
return errors.New("orphaned block can't be nil")
|
||||
}
|
||||
|
||||
for _, a := range orphanedBlk.Block().Body().Attestations() {
|
||||
// Is the attestation one epoch older.
|
||||
if a.Data.Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
|
||||
continue
|
||||
for orphanedRoot != commonAncestorRoot {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if helpers.IsAggregated(a) {
|
||||
if err := s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
saveOrphanedAttCount.Inc()
|
||||
}
|
||||
|
||||
orphanedBlk, err := s.getBlock(ctx, orphanedRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the block is an epoch older, break out of the loop since we can't include atts anyway.
|
||||
// This prevents stuck within this for loop longer than necessary.
|
||||
if orphanedBlk.Block().Slot()+params.BeaconConfig().SlotsPerEpoch <= s.CurrentSlot() {
|
||||
break
|
||||
}
|
||||
for _, a := range orphanedBlk.Block().Body().Attestations() {
|
||||
// if the attestation is one epoch older, it wouldn't been useful to save it.
|
||||
if a.Data.Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
|
||||
continue
|
||||
}
|
||||
if helpers.IsAggregated(a) {
|
||||
if err := s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
saveOrphanedAttCount.Inc()
|
||||
}
|
||||
orphanedRoot = bytesutil.ToBytes32(orphanedBlk.Block().ParentRoot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package blockchain
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -10,9 +11,11 @@ import (
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
@@ -49,6 +52,9 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
|
||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
service.head = &head{
|
||||
slot: 0,
|
||||
root: oldRoot,
|
||||
@@ -64,6 +70,9 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
@@ -93,6 +102,9 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
|
||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
service.head = &head{
|
||||
slot: 0,
|
||||
root: oldRoot,
|
||||
@@ -110,6 +122,9 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
@@ -153,14 +168,14 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
|
||||
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{}, [32]byte{'b'})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{})
|
||||
headRoot, err := service.updateHead(context.Background(), []uint64{})
|
||||
_, err = service.updateHead(context.Background(), []uint64{})
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, service.saveHead(context.Background(), headRoot, wsb, st))
|
||||
}
|
||||
|
||||
func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
@@ -229,57 +244,374 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts(t *testing.T) {
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
func TestSaveOrphanedAtts_NoCommonAncestor(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now()
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2 -- 3
|
||||
// -4
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r))
|
||||
|
||||
require.Equal(t, len(b.Block.Body.Attestations), service.cfg.AttPool.AggregatedAttestationCount())
|
||||
savedAtts := service.cfg.AttPool.AggregatedAttestations()
|
||||
atts := b.Block.Body.Attestations
|
||||
require.DeepSSZEqual(t, atts, savedAtts)
|
||||
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
blk1.Block.ParentRoot = rG[:]
|
||||
r1, err := blk1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
assert.NoError(t, err)
|
||||
blk2.Block.ParentRoot = r1[:]
|
||||
r2, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
|
||||
assert.NoError(t, err)
|
||||
blk3.Block.ParentRoot = r2[:]
|
||||
r3, err := blk3.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk4 := util.NewBeaconBlock()
|
||||
blk4.Block.Slot = 4
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
|
||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2 -- 3
|
||||
// \-4
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
blk1.Block.ParentRoot = rG[:]
|
||||
r1, err := blk1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
assert.NoError(t, err)
|
||||
blk2.Block.ParentRoot = r1[:]
|
||||
r2, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
|
||||
assert.NoError(t, err)
|
||||
blk3.Block.ParentRoot = r2[:]
|
||||
r3, err := blk3.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk4 := util.NewBeaconBlock()
|
||||
blk4.Block.Slot = 4
|
||||
blk4.Block.ParentRoot = rG[:]
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []*ethpb.Attestation{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
blk2.Block.Body.Attestations[0],
|
||||
blk1.Block.Body.Attestations[0],
|
||||
}
|
||||
atts := service.cfg.AttPool.AggregatedAttestations()
|
||||
sort.Slice(atts, func(i, j int) bool {
|
||||
return atts[i].Data.Slot > atts[j].Data.Slot
|
||||
})
|
||||
require.DeepEqual(t, wantAtts, atts)
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2
|
||||
// \-4
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
blk1.Block.ParentRoot = rG[:]
|
||||
r1, err := blk1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
assert.NoError(t, err)
|
||||
blk2.Block.ParentRoot = r1[:]
|
||||
r2, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk4 := util.NewBeaconBlock()
|
||||
blk4.Block.Slot = 4
|
||||
blk4.Block.ParentRoot = rG[:]
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r2, r4))
|
||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts_NoCommonAncestor_DoublyLinkedTrie(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableForkChoiceDoublyLinkedTree: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2 -- 3
|
||||
// -4
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r))
|
||||
|
||||
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
blk1.Block.ParentRoot = rG[:]
|
||||
r1, err := blk1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
assert.NoError(t, err)
|
||||
blk2.Block.ParentRoot = r1[:]
|
||||
r2, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
|
||||
assert.NoError(t, err)
|
||||
blk3.Block.ParentRoot = r2[:]
|
||||
r3, err := blk3.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk4 := util.NewBeaconBlock()
|
||||
blk4.Block.Slot = 4
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
|
||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableForkChoiceDoublyLinkedTree: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2 -- 3
|
||||
// \-4
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
blk1.Block.ParentRoot = rG[:]
|
||||
r1, err := blk1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
assert.NoError(t, err)
|
||||
blk2.Block.ParentRoot = r1[:]
|
||||
r2, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
|
||||
assert.NoError(t, err)
|
||||
blk3.Block.ParentRoot = r2[:]
|
||||
r3, err := blk3.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk4 := util.NewBeaconBlock()
|
||||
blk4.Block.Slot = 4
|
||||
blk4.Block.ParentRoot = rG[:]
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []*ethpb.Attestation{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
blk2.Block.Body.Attestations[0],
|
||||
blk1.Block.Body.Attestations[0],
|
||||
}
|
||||
atts := service.cfg.AttPool.AggregatedAttestations()
|
||||
sort.Slice(atts, func(i, j int) bool {
|
||||
return atts[i].Data.Slot > atts[j].Data.Slot
|
||||
})
|
||||
require.DeepEqual(t, wantAtts, atts)
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableForkChoiceDoublyLinkedTree: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2
|
||||
// \-4
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
blk1.Block.ParentRoot = rG[:]
|
||||
r1, err := blk1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
assert.NoError(t, err)
|
||||
blk2.Block.ParentRoot = r1[:]
|
||||
r2, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk4 := util.NewBeaconBlock()
|
||||
blk4.Block.Slot = 4
|
||||
blk4.Block.ParentRoot = rG[:]
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r2, r4))
|
||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
savedAtts := service.cfg.AttPool.AggregatedAttestations()
|
||||
atts := b.Block.Body.Attestations
|
||||
require.DeepNotSSZEqual(t, atts, savedAtts)
|
||||
}
|
||||
|
||||
func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New(0, 0)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -296,7 +628,7 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
|
||||
fcp := ðpb.Checkpoint{
|
||||
Root: bellatrixBlkRoot[:],
|
||||
Epoch: 1,
|
||||
Epoch: 0,
|
||||
}
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(fcp, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(fcp, [32]byte{'b'})
|
||||
@@ -309,6 +641,9 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||
headRoot := service.headRoot()
|
||||
require.Equal(t, [32]byte{}, headRoot)
|
||||
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, bellatrixBlkRoot, [32]byte{}, [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||
newRoot, err := service.updateHead(ctx, []uint64{1, 2})
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, headRoot, newRoot)
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
@@ -60,27 +62,50 @@ func logBlockSyncStatus(block interfaces.BeaconBlock, blockRoot [32]byte, justif
|
||||
return err
|
||||
}
|
||||
level := log.Logger.GetLevel()
|
||||
|
||||
log = log.WithField("slot", block.Slot())
|
||||
if level >= logrus.DebugLevel {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(block.ParentRoot())[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime),
|
||||
}).Debug("Synced new block")
|
||||
} else {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
}).Info("Synced new block")
|
||||
log = log.WithField("slotInEpoch", block.Slot()%params.BeaconConfig().SlotsPerEpoch)
|
||||
log = log.WithField("justifiedEpoch", justified.Epoch)
|
||||
log = log.WithField("justifiedRoot", fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]))
|
||||
log = log.WithField("parentRoot", fmt.Sprintf("0x%s...", hex.EncodeToString(block.ParentRoot())[:8]))
|
||||
log = log.WithField("version", version.String(block.Version()))
|
||||
log = log.WithField("sinceSlotStartTime", prysmTime.Now().Sub(startTime))
|
||||
log = log.WithField("chainServiceProcessedTime", prysmTime.Now().Sub(receivedTime))
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
}).Info("Synced new block")
|
||||
return nil
|
||||
}
|
||||
|
||||
// logs payload related data every slot.
|
||||
func logPayload(block interfaces.BeaconBlock) error {
|
||||
isExecutionBlk, err := blocks.IsExecutionBlock(block.Body())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine if block is execution block")
|
||||
}
|
||||
if !isExecutionBlk {
|
||||
return nil
|
||||
}
|
||||
payload, err := block.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.GasLimit == 0 {
|
||||
return errors.New("gas limit should not be 0")
|
||||
}
|
||||
gasUtilized := float64(payload.GasUsed) / float64(payload.GasLimit)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash)),
|
||||
"blockNumber": payload.BlockNumber,
|
||||
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
|
||||
}).Debug("Synced new payload")
|
||||
return nil
|
||||
}
|
||||
|
||||
81
beacon-chain/blockchain/merge_ascii_art.go
Normal file
81
beacon-chain/blockchain/merge_ascii_art.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package blockchain
|
||||
|
||||
var mergeAsciiArt = `
|
||||
|
||||
+?$$$$$$?*; ;*?$$$$?*; +!$$$$$$?!;
|
||||
!##$???$@##$+ !&#@$??$&#@* +@#&$????$##+
|
||||
!##; +@#&; !##* ;$##* @#$ ;&#+
|
||||
!##; !##+ ;##$ @#@ $#&* ++;
|
||||
!##; ;@#&; *##* ?##; ;$##&$!+;
|
||||
!##?!!!?$##$+ !##+ !##+ ;!$@##&$!;
|
||||
!##@@@@$$!+ *##* ?##; ;*?@#&!
|
||||
!##; ;##$ @#@ ;?$; ?##+
|
||||
!##; ?##! ;?##+ ;##+ ;$#&;
|
||||
!##; !&#&$??$&#@* ;&#&$$??$$&#@+
|
||||
+??; ;*?$$$$?+ ;+!?$$$$$!+
|
||||
;;;;
|
||||
;+!??$$$?!*+; ;*?$@&&&&&@@$!*;
|
||||
*?@############&$?+ ;!@###############&$!;
|
||||
;!@####&@$????$$@#####@! ;?&####$?*++;++*!?@####&?;
|
||||
*@###&$*; ;*$&###@* *&###@!; ;!@###&!
|
||||
!###&!; ;?&###? *####! *&###?
|
||||
!###@+ ;$###$ +###&+ ;$###?
|
||||
;###&; $###? ;;+*!??$$$$$$$$??!$###* ;@###*
|
||||
!###! &###?$@&#####################@$?!+; +###$
|
||||
$###+ ;*?&#################################&$?*; &##&;
|
||||
$###+ ;!$&########################################&$!; &###;
|
||||
*###? ;!$################################################$*; +###@
|
||||
;@###+ +$&####################################################&?; $###!
|
||||
+###&+ *$##########################################################$+ ;$###$;
|
||||
*&###?; +$##############################################################?*@###$;
|
||||
+$###&?+ ;$#####################################################################?;
|
||||
*@####@&#####################################################################*
|
||||
+$&##################@?!*++*!$&###################&$?*++*!?$###############&*
|
||||
$###############&?+ ;!@###############@!; ;!@##############!
|
||||
;$##############&!; *&###########&!; !&#############!
|
||||
$##############@+ +@* ;$#########$; +@* ;$#############!
|
||||
?##############$; *###* $#######$; +&##! ?#############+
|
||||
+##############$ !#####! $#####@; *#####? ?############@;
|
||||
@#############@; !#######! ;&####+ *#######? $############?
|
||||
+##############+ ?#########? $###$ !#########$ +############&;
|
||||
$#############$ ;$###########? !###? !###########$; $############!
|
||||
@#############* !#############! ?###$ *#############? +############@
|
||||
;&############&; +?@#######&$+; ;&####; ;+?@#######&?+; @############;
|
||||
;#############@ +$&#&$*; ;$#####@; +$&#&$*; $############+
|
||||
;#############$ *+ ;+; ;*; *&#######&! ;*; ;+; +*; $############*
|
||||
&############@ ;$@!; +$@! ;?###########$; *@$* ;*$@+ $############*
|
||||
$############&; ;$#&$*+!@##* +@#############@+ +&#@?++$&#@; @############+
|
||||
*#############* $######&+ !#################? +@######$; +#############;
|
||||
@############$ ?####@+ ;$###################$; ;@####$ $############@
|
||||
*#############* !##@; +@#####################&* ;$##? +#############!
|
||||
$#############+ *$; ?#########################?; $! +&############&;
|
||||
;&#############! +$###########################@+ *&#############?
|
||||
+##############$*; ;?###############################$+ *$##############@;
|
||||
*###############&$?!!$###################################$?!?$&################+
|
||||
*###################################&@$$$$@&#################################!
|
||||
+&##############################&?+; ;+?&#############################?
|
||||
;$############################@; ;@###########################!
|
||||
?###########################* *##########################*
|
||||
+@#############&$!+$#######? ?########$+!$&###########@+
|
||||
!&###########&; $#######$+ +$########? +&##########?;
|
||||
;?###########&* *@#######@$!; ;$@########@* *##########$+
|
||||
;?&##########?; ;*$&####&$* ;!$&####&$* ;$#########@*
|
||||
;!@#########@!; ;++*+; ;*; ;+*++; ;!&########$*
|
||||
*$&########&$*; ;*$&#&$*; +*$&#######&?+
|
||||
;*$&#########&@$$@&#########&@$$@&########&$*;
|
||||
;+?$&##############################&$?+;
|
||||
;+!?$@&###################&$$!+;
|
||||
;++*!??$$$$$$$?!!*+;
|
||||
|
||||
;;; ;;+*++; ;;;++;;;++;;; ;+++;;;++++; ;;; ;; ;;; ;;;++;;;+++;; ;;;+++++++; ;+++++;
|
||||
;@#&+ +$&&@$@&#? @#@@@&#&@@@#$ ;$@@@@#&@@@@! !#@ !#$ +&#&; !#&@@@#&@@@&&; !#&@@@@@@@* $#&@@@&&$+
|
||||
$#?#@; ?#&!; *#$ &&;;;$#!;;*#$ ;;;*#@;;;; $#? +#&; ;@#?#$ !#!;;*#@;;;@#; ?#$;;;;;;; @#* ;!&#?
|
||||
*#$ ?#$ *#&; ;+; ++ $#! ;+; *#$ ;&#+ $#* ?#? $#! ;+; +#@ ++ ?#$ $#* ;&#*
|
||||
;&&; @#* $#$ $#! *#$ !#@; !#$ +#@ ;&#; +#@ ?#@??????! $#* $#$
|
||||
$#!;;;*#&; $#? $#! *#$ $#? ;&&; ;@#*;;;!#$ +#@ ?#@??????! $#* $#$
|
||||
!##@@@@@&#$ !#@; $#! *#$ ;&#+ $#* ?#&@@@@@##! +#@ ?#$ $#* @#*
|
||||
;&&+;;;;;;@#* ;$#$+ @$ $#! *#$ *#@!#? *#@;;;;;;+##+ +#@ ?#$ $#* +$#$
|
||||
$#* +#&; ;?&#@$$$$#$ +$$&#@$$; ;$$$$@##$$$$* $##@; ;&#+ !#@; $$@##$$* ?#&$$$$$$$! @#@$$$@&@!
|
||||
;** +*; +*!?!*+; ;******* ;***********+ ;**; ;*+ **; *******+ ;*********+ +*!!!!*;
|
||||
|
||||
`
|
||||
@@ -158,6 +158,10 @@ var (
|
||||
Name: "forkchoice_updated_optimistic_node_count",
|
||||
Help: "Count the number of optimistic nodes after forkchoiceUpdated EE call",
|
||||
})
|
||||
missedPayloadIDFilledCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "missed_payload_id_filled_count",
|
||||
Help: "",
|
||||
})
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
return []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
|
||||
@@ -5,7 +5,9 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
@@ -68,7 +70,8 @@ func (s *Service) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(bj, h)
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(bj); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Epoch: bj.Epoch, Root: bytesutil.ToBytes32(bj.Root)}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
|
||||
func TestService_newSlot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -36,11 +36,21 @@ func TestService_newSlot(t *testing.T) {
|
||||
bj, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, 0, 0)) // genesis
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 32, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0)) // finalized
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0)) // justified
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 96, bj, [32]byte{'a'}, [32]byte{}, 0, 0)) // best justified
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 97, [32]byte{'d'}, [32]byte{}, [32]byte{}, 0, 0)) // bad
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // genesis
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 32, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // finalized
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // justified
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 96, bj, [32]byte{'a'}, [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // best justified
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 97, [32]byte{'d'}, [32]byte{}, [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // bad
|
||||
|
||||
type args struct {
|
||||
slot types.Slot
|
||||
|
||||
@@ -78,6 +78,8 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
|
||||
"mergeBlockParentTotalDifficulty": mergeBlockParentTD,
|
||||
}).Info("Validated terminal block")
|
||||
|
||||
log.Info(mergeAsciiArt)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -109,7 +109,7 @@ func Test_validateMergeBlock(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -152,7 +152,7 @@ func Test_validateMergeBlock(t *testing.T) {
|
||||
func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
@@ -28,7 +29,7 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0)),
|
||||
WithForkChoiceStore(protoarray.New()),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
@@ -140,7 +141,7 @@ func TestStore_OnAttestation_ErrorConditions_DoublyLinkedTree(t *testing.T) {
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(doublylinkedtree.New(0, 0)),
|
||||
WithForkChoiceStore(doublylinkedtree.New()),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
@@ -250,7 +251,7 @@ func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -268,7 +269,9 @@ func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0]))
|
||||
}
|
||||
|
||||
@@ -276,7 +279,7 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := doublylinkedtree.New(0, 0)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -294,7 +297,9 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0]))
|
||||
}
|
||||
|
||||
@@ -483,7 +488,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -518,7 +523,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := doublylinkedtree.New(0, 0)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -599,10 +604,16 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, b32.Block.Slot, r32, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, b33.Block.Slot, r33, r32, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, b32.Block.Slot, r32, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, b33.Block.Slot, r33, r32, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
_, err = service.cfg.ForkChoiceStore.Head(ctx, r32, []uint64{})
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r32}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(jc))
|
||||
_, err = service.cfg.ForkChoiceStore.Head(ctx, []uint64{})
|
||||
require.NoError(t, err)
|
||||
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
@@ -118,7 +119,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
}
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, postStateVersion, postStateHeader, signed)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not verify new payload")
|
||||
return fmt.Errorf("could not verify new payload: %v", err)
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preStateVersion, preStateHeader, signed); err != nil {
|
||||
@@ -221,10 +222,12 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(postState.CurrentJustifiedCheckpoint(), h)
|
||||
// Update Forkchoice checkpoints
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(psj); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Epoch: psj.Epoch, Root: bytesutil.ToBytes32(psj.Root)}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(psf); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Epoch: psf.Epoch, Root: bytesutil.ToBytes32(psf.Root)}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -281,7 +284,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
}
|
||||
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
|
||||
if err := s.cfg.ForkChoiceStore.Prune(ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not prune proto array fork choice nodes")
|
||||
return errors.Wrap(err, "could not prune fork choice nodes")
|
||||
}
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
|
||||
if err != nil {
|
||||
@@ -335,33 +338,38 @@ func getStateVersionAndPayload(st state.BeaconState) (int, *ethpb.ExecutionPaylo
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeaconBlock,
|
||||
blockRoots [][32]byte) ([]*ethpb.Checkpoint, []*ethpb.Checkpoint, error) {
|
||||
blockRoots [][32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
if len(blks) == 0 || len(blockRoots) == 0 {
|
||||
return nil, nil, errors.New("no blocks provided")
|
||||
return errors.New("no blocks provided")
|
||||
}
|
||||
|
||||
if len(blks) != len(blockRoots) {
|
||||
return nil, nil, errWrongBlockCount
|
||||
return errWrongBlockCount
|
||||
}
|
||||
|
||||
if err := wrapper.BeaconBlockIsNil(blks[0]); err != nil {
|
||||
return nil, nil, invalidBlock{err}
|
||||
return invalidBlock{err}
|
||||
}
|
||||
b := blks[0].Block()
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
if err := s.verifyBlkPreState(ctx, b); err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(b.ParentRoot()))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
if preState == nil || preState.IsNil() {
|
||||
return nil, nil, fmt.Errorf("nil pre state for slot %d", b.Slot())
|
||||
return fmt.Errorf("nil pre state for slot %d", b.Slot())
|
||||
}
|
||||
|
||||
// Fill in missing blocks
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0].Block(), preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
return errors.Wrap(err, "could not fill in missing blocks to forkchoice")
|
||||
}
|
||||
|
||||
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
@@ -382,7 +390,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
for i, b := range blks {
|
||||
v, h, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
preVersionAndHeaders[i] = &versionAndHeader{
|
||||
version: v,
|
||||
@@ -391,7 +399,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
|
||||
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
|
||||
if err != nil {
|
||||
return nil, nil, invalidBlock{err}
|
||||
return invalidBlock{err}
|
||||
}
|
||||
// Save potential boundary states.
|
||||
if slots.IsEpochStart(preState.Slot()) {
|
||||
@@ -402,7 +410,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
|
||||
v, h, err = getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
postVersionAndHeaders[i] = &versionAndHeader{
|
||||
version: v,
|
||||
@@ -412,52 +420,71 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
}
|
||||
verify, err := sigSet.Verify()
|
||||
if err != nil {
|
||||
return nil, nil, invalidBlock{err}
|
||||
return invalidBlock{err}
|
||||
}
|
||||
if !verify {
|
||||
return nil, nil, errors.New("batch block signature verification failed")
|
||||
return errors.New("batch block signature verification failed")
|
||||
}
|
||||
|
||||
// blocks have been verified, add them to forkchoice and call the engine
|
||||
// blocks have been verified, save them and call the engine
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, len(blks))
|
||||
var isValidPayload bool
|
||||
for i, b := range blks {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx,
|
||||
isValidPayload, err = s.notifyNewPayload(ctx,
|
||||
postVersionAndHeaders[i].version,
|
||||
postVersionAndHeaders[i].header, b)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
|
||||
preVersionAndHeaders[i].header, b); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, b.Block(), blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoots[i]); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not set optimistic block to valid")
|
||||
return err
|
||||
}
|
||||
}
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
pendingNodes[len(blks)-i-1] = args
|
||||
s.saveInitSyncBlock(blockRoots[i], b)
|
||||
if err = s.handleBlockAfterBatchVerify(ctx, b, blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Insert all nodes but the last one to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertOptimisticChain(ctx, pendingNodes); err != nil {
|
||||
return errors.Wrap(err, "could not insert batch to forkchoice")
|
||||
}
|
||||
// Insert the last block to forkchoice
|
||||
lastBR := blockRoots[len(blks)-1]
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastBR); err != nil {
|
||||
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
|
||||
}
|
||||
// Prune forkchoice store only if the new finalized checkpoint is higher
|
||||
// than the finalized checkpoint in forkchoice store.
|
||||
if fCheckpoints[len(blks)-1].Epoch > s.cfg.ForkChoiceStore.FinalizedCheckpoint().Epoch {
|
||||
if err := s.cfg.ForkChoiceStore.Prune(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(fCheckpoints[len(blks)-1].Root))); err != nil {
|
||||
return errors.Wrap(err, "could not prune fork choice nodes")
|
||||
}
|
||||
}
|
||||
|
||||
// Set their optimistic status
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, lastBR); err != nil {
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
|
||||
for r, st := range boundaries {
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Also saves the last post state which to be used as pre state for the next batch.
|
||||
lastB := blks[len(blks)-1]
|
||||
lastBR := blockRoots[len(blockRoots)-1]
|
||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
headState: preState,
|
||||
@@ -465,12 +492,9 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
headBlock: lastB.Block(),
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
if err := s.saveHeadNoDB(ctx, lastB, lastBR, preState); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return fCheckpoints, jCheckpoints, nil
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState)
|
||||
}
|
||||
|
||||
// handles a block after the block's batch has been verified, where we can save blocks
|
||||
@@ -521,6 +545,10 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed interf
|
||||
return err
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(fCheckpoint, h)
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Epoch: fCheckpoint.Epoch, Root: bytesutil.ToBytes32(fCheckpoint.Root)}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -537,16 +565,20 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
|
||||
return err
|
||||
}
|
||||
// Update caches for the next epoch at epoch boundary slot - 1.
|
||||
if err := helpers.UpdateCommitteeCache(copied, coreTime.CurrentEpoch(copied)); err != nil {
|
||||
if err := helpers.UpdateCommitteeCache(ctx, copied, coreTime.CurrentEpoch(copied)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, copied); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if postState.Slot() >= s.nextEpochBoundarySlot {
|
||||
if err := reportEpochMetrics(ctx, postState, s.head.state); err != nil {
|
||||
s.headLock.RLock()
|
||||
st := s.head.state
|
||||
s.headLock.RUnlock()
|
||||
if err := reportEpochMetrics(ctx, postState, st); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var err error
|
||||
s.nextEpochBoundarySlot, err = slots.EpochStart(coreTime.NextEpoch(postState))
|
||||
if err != nil {
|
||||
@@ -555,7 +587,7 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
|
||||
|
||||
// Update caches at epoch boundary slot.
|
||||
// The following updates have short cut to return nil cheaply if fulfilled during boundary slot - 1.
|
||||
if err := helpers.UpdateCommitteeCache(postState, coreTime.CurrentEpoch(postState)); err != nil {
|
||||
if err := helpers.UpdateCommitteeCache(ctx, postState, coreTime.CurrentEpoch(postState)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, postState); err != nil {
|
||||
@@ -568,14 +600,13 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
|
||||
|
||||
// This feeds in the block and block's attestations to fork choice store. It's allows fork choice store
|
||||
// to gain information on the most current chain.
|
||||
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk interfaces.BeaconBlock, root [32]byte,
|
||||
st state.BeaconState) error {
|
||||
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk interfaces.BeaconBlock, root [32]byte, st state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockAndAttestationsToForkChoiceStore")
|
||||
defer span.End()
|
||||
|
||||
fCheckpoint := st.FinalizedCheckpoint()
|
||||
jCheckpoint := st.CurrentJustifiedCheckpoint()
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint); err != nil {
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, st, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
// Feed in block's attestations to fork choice store.
|
||||
@@ -593,21 +624,11 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk interfaces.BeaconBlock,
|
||||
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk interfaces.BeaconBlock, root [32]byte, st state.BeaconState, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
// Feed in block to fork choice store.
|
||||
|
||||
payloadHash, err := getBlockPayloadHash(blk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
|
||||
blk.Slot(), root, bytesutil.ToBytes32(blk.ParentRoot()), payloadHash,
|
||||
jCheckpoint.Epoch,
|
||||
fCheckpoint.Epoch)
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
}
|
||||
|
||||
// Inserts attester slashing indices to fork choice store.
|
||||
@@ -621,18 +642,6 @@ func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashing
|
||||
}
|
||||
}
|
||||
|
||||
func getBlockPayloadHash(blk interfaces.BeaconBlock) ([32]byte, error) {
|
||||
payloadHash := [32]byte{}
|
||||
if blocks.IsPreBellatrixVersion(blk.Version()) {
|
||||
return payloadHash, nil
|
||||
}
|
||||
payload, err := blk.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return payloadHash, err
|
||||
}
|
||||
return bytesutil.ToBytes32(payload.BlockHash), nil
|
||||
}
|
||||
|
||||
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
|
||||
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
|
||||
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock, st state.BeaconState) error {
|
||||
@@ -650,10 +659,6 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interface
|
||||
// This removes the attestations from the mem pool. It will only remove the attestations if input root `r` is canonical,
|
||||
// meaning the block `b` is part of the canonical chain.
|
||||
func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock) error {
|
||||
if !features.Get().CorrectlyPruneCanonicalAtts {
|
||||
return nil
|
||||
}
|
||||
|
||||
canonical, err := s.IsCanonical(ctx, r)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -709,3 +714,53 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
||||
}
|
||||
return s.validateMergeBlock(ctx, blk)
|
||||
}
|
||||
|
||||
// This routine checks if there is a cached proposer payload ID available for the next slot proposer.
|
||||
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
|
||||
func (s *Service) fillMissingPayloadIDRoutine(ctx context.Context, stateFeed *event.Feed) {
|
||||
// Wait for state to be initialized.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := stateFeed.Subscribe(stateChannel)
|
||||
go func() {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
stateSub.Unsubscribe()
|
||||
return
|
||||
case <-stateChannel:
|
||||
stateSub.Unsubscribe()
|
||||
break
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case ti := <-ticker.C:
|
||||
if !atHalfSlot(ti) {
|
||||
continue
|
||||
}
|
||||
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot() + 1)
|
||||
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
|
||||
if has && id == [8]byte{} {
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: s.headState(ctx),
|
||||
headRoot: s.headRoot(),
|
||||
headBlock: s.headBlock().Block(),
|
||||
}); err != nil {
|
||||
log.WithError(err).Error("Could not prepare payload on empty ID")
|
||||
}
|
||||
missedPayloadIDFilledCount.Inc()
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Returns true if time `t` is halfway through the slot in sec.
|
||||
func atHalfSlot(t time.Time) bool {
|
||||
s := params.BeaconConfig().SecondsPerSlot
|
||||
return uint64(t.Second())%s == s/2
|
||||
}
|
||||
|
||||
@@ -7,6 +7,9 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
@@ -215,7 +218,8 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(cpt, h)
|
||||
// Update forkchoice's justified checkpoint
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(cpt); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Epoch: cpt.Epoch, Root: bytesutil.ToBytes32(cpt.Root)}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -241,8 +245,8 @@ func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpo
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(cp, h)
|
||||
|
||||
return nil
|
||||
return s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Epoch: cp.Epoch, Root: bytesutil.ToBytes32(cp.Root)})
|
||||
}
|
||||
|
||||
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
|
||||
@@ -262,7 +266,7 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
|
||||
|
||||
fRoot := bytesutil.ToBytes32(cp.Root)
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
|
||||
if err != nil {
|
||||
if err != nil && err != protoarray.ErrUnknownNodeRoot && err != doublylinkedtree.ErrNilNode {
|
||||
return err
|
||||
}
|
||||
if !optimistic {
|
||||
@@ -346,11 +350,8 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot types.Slot)
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfaces.BeaconBlock,
|
||||
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
pendingNodes := make([]interfaces.BeaconBlock, 0)
|
||||
pendingRoots := make([][32]byte, 0)
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
|
||||
parentRoot := bytesutil.ToBytes32(blk.ParentRoot())
|
||||
slot := blk.Slot()
|
||||
// Fork choice only matters from last finalized slot.
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
@@ -360,39 +361,31 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
higherThanFinalized := slot > fSlot
|
||||
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: blk,
|
||||
JustifiedCheckpoint: jCheckpoint, FinalizedCheckpoint: fCheckpoint})
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
for !s.cfg.ForkChoiceStore.HasNode(parentRoot) && s.cfg.BeaconDB.HasBlock(ctx, parentRoot) && higherThanFinalized {
|
||||
b, err := s.getBlock(ctx, parentRoot)
|
||||
root := bytesutil.ToBytes32(blk.ParentRoot())
|
||||
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
|
||||
b, err := s.getBlock(ctx, root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pendingNodes = append(pendingNodes, b.Block())
|
||||
copiedRoot := parentRoot
|
||||
pendingRoots = append(pendingRoots, copiedRoot)
|
||||
parentRoot = bytesutil.ToBytes32(b.Block().ParentRoot())
|
||||
slot = b.Block().Slot()
|
||||
higherThanFinalized = slot > fSlot
|
||||
}
|
||||
|
||||
// Insert parent nodes to fork choice store in reverse order.
|
||||
// Lower slots should be at the end of the list.
|
||||
for i := len(pendingNodes) - 1; i >= 0; i-- {
|
||||
b := pendingNodes[i]
|
||||
r := pendingRoots[i]
|
||||
payloadHash, err := getBlockPayloadHash(blk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
|
||||
b.Slot(), r, bytesutil.ToBytes32(b.ParentRoot()), payloadHash,
|
||||
jCheckpoint.Epoch,
|
||||
fCheckpoint.Epoch); err != nil {
|
||||
return errors.Wrap(err, "could not process block for proto array fork choice")
|
||||
if b.Block().Slot() <= fSlot {
|
||||
break
|
||||
}
|
||||
root = bytesutil.ToBytes32(b.Block().ParentRoot())
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
|
||||
JustifiedCheckpoint: jCheckpoint,
|
||||
FinalizedCheckpoint: fCheckpoint}
|
||||
pendingNodes = append(pendingNodes, args)
|
||||
}
|
||||
return nil
|
||||
if len(pendingNodes) == 1 {
|
||||
return nil
|
||||
}
|
||||
if root != s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root)) {
|
||||
return errNotDescendantOfFinalized
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.InsertOptimisticChain(ctx, pendingNodes)
|
||||
}
|
||||
|
||||
// inserts finalized deposits into our finalized deposit trie.
|
||||
|
||||
@@ -21,11 +21,11 @@ import (
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
@@ -47,7 +47,7 @@ func TestStore_OnBlock_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -150,7 +150,7 @@ func TestStore_OnBlock_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New(0, 0)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -253,7 +253,7 @@ func TestStore_OnBlock_ProposerBoostEarly(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New(0, 0)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
@@ -268,7 +268,7 @@ func TestStore_OnBlock_ProposerBoostEarly(t *testing.T) {
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.BoostProposerRoot(ctx, args))
|
||||
_, err = service.cfg.ForkChoiceStore.Head(ctx, params.BeaconConfig().ZeroHash, []uint64{})
|
||||
_, err = service.cfg.ForkChoiceStore.Head(ctx, []uint64{})
|
||||
require.ErrorContains(t, "could not apply proposer boost score: invalid proposer boost root", err)
|
||||
}
|
||||
|
||||
@@ -293,7 +293,7 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
|
||||
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
|
||||
service.cfg.ForkChoiceStore = protoarray.New()
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(gRoot, wsb)
|
||||
@@ -305,7 +305,7 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
|
||||
var blks []interfaces.SignedBeaconBlock
|
||||
var blkRoots [][32]byte
|
||||
var firstState state.BeaconState
|
||||
for i := 1; i < 10; i++ {
|
||||
for i := 1; i < 97; i++ {
|
||||
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
@@ -331,9 +331,78 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
|
||||
rBlock.Block.ParentRoot = gRoot[:]
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
|
||||
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
|
||||
_, _, err = service.onBlockBatch(ctx, blks, blkRoots[1:])
|
||||
err = service.onBlockBatch(ctx, blks, blkRoots[1:])
|
||||
require.ErrorIs(t, errWrongBlockCount, err)
|
||||
_, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
|
||||
service.originBlockRoot = blkRoots[1]
|
||||
err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
|
||||
require.NoError(t, err)
|
||||
jcp, err := service.store.JustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
jroot := bytesutil.ToBytes32(jcp.Root)
|
||||
require.Equal(t, blkRoots[63], jroot)
|
||||
require.Equal(t, types.Epoch(2), service.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch)
|
||||
}
|
||||
|
||||
func TestStore_OnBlockBatch_PruneOK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.cfg.ForkChoiceStore = protoarray.New()
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(gRoot, wsb)
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
bState := st.Copy()
|
||||
|
||||
var blks []interfaces.SignedBeaconBlock
|
||||
var blkRoots [][32]byte
|
||||
var firstState state.BeaconState
|
||||
for i := 1; i < 128; i++ {
|
||||
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
bState, err = transition.ExecuteStateTransition(ctx, bState, wsb)
|
||||
if i == 32 {
|
||||
firstState = bState.Copy()
|
||||
}
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(root, wsb)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, wsb)
|
||||
blkRoots = append(blkRoots, root)
|
||||
}
|
||||
|
||||
for i := 0; i < 32; i++ {
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[i]))
|
||||
}
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: blkRoots[31][:], Epoch: 1}, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: blkRoots[31][:], Epoch: 1}, [32]byte{'b'})
|
||||
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[31], firstState))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, firstState, blkRoots[31]))
|
||||
err = service.onBlockBatch(ctx, blks[32:], blkRoots[32:])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -358,7 +427,7 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
|
||||
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(gRoot, wsb)
|
||||
@@ -370,7 +439,7 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
|
||||
var blks []interfaces.SignedBeaconBlock
|
||||
var blkRoots [][32]byte
|
||||
var firstState state.BeaconState
|
||||
for i := 1; i < 10; i++ {
|
||||
for i := 1; i < 97; i++ {
|
||||
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
|
||||
@@ -396,10 +465,16 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
|
||||
rBlock.Block.ParentRoot = gRoot[:]
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
|
||||
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
|
||||
_, _, err = service.onBlockBatch(ctx, blks, blkRoots[1:])
|
||||
err = service.onBlockBatch(ctx, blks, blkRoots[1:])
|
||||
require.ErrorIs(t, errWrongBlockCount, err)
|
||||
_, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
|
||||
service.originBlockRoot = blkRoots[1]
|
||||
err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
|
||||
require.NoError(t, err)
|
||||
jcp, err := service.store.JustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
jroot := bytesutil.ToBytes32(jcp.Root)
|
||||
require.Equal(t, blkRoots[63], jroot)
|
||||
require.Equal(t, types.Epoch(2), service.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch)
|
||||
}
|
||||
|
||||
func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
@@ -421,7 +496,7 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
|
||||
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
service.saveInitSyncBlock(gRoot, wsb)
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
bState := st.Copy()
|
||||
@@ -452,10 +527,9 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
rBlock.Block.ParentRoot = gRoot[:]
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
|
||||
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
|
||||
cp1, cp2, err := service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
|
||||
service.originBlockRoot = blkRoots[1]
|
||||
err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blkCount-1, len(cp1))
|
||||
require.Equal(t, blkCount-1, len(cp2))
|
||||
}
|
||||
|
||||
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
@@ -503,7 +577,7 @@ func TestShouldUpdateJustified_ReturnFalse_ProtoArray(t *testing.T) {
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
|
||||
service.cfg.ForkChoiceStore = protoarray.New()
|
||||
lastJustifiedBlk := util.NewBeaconBlock()
|
||||
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
|
||||
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
|
||||
@@ -536,7 +610,7 @@ func TestShouldUpdateJustified_ReturnFalse_DoublyLinkedTree(t *testing.T) {
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
lastJustifiedBlk := util.NewBeaconBlock()
|
||||
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
|
||||
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
|
||||
@@ -583,7 +657,7 @@ func TestCachedPreState_CanGetFromStateSummary_ProtoArray(t *testing.T) {
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
|
||||
service.cfg.ForkChoiceStore = protoarray.New()
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(gRoot, wsb)
|
||||
@@ -620,7 +694,7 @@ func TestCachedPreState_CanGetFromStateSummary_DoublyLinkedTree(t *testing.T) {
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(gRoot, wsb)
|
||||
@@ -654,7 +728,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
|
||||
service.cfg.ForkChoiceStore = protoarray.New()
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(gRoot, wsb)
|
||||
@@ -685,7 +759,7 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(protoarray.New(0, 0)),
|
||||
WithForkChoiceStore(protoarray.New()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -731,8 +805,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = protoarray.New()
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -755,12 +828,14 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: roots[0]}, [32]byte{})
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
// 4 nodes from the block tree 1. B3 - B4 - B6 - B8
|
||||
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[3])), "Didn't save node")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])), "Didn't save node")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])), "Didn't save node")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])), "Didn't save node")
|
||||
@@ -776,8 +851,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -801,12 +875,14 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: roots[0]}, [32]byte{})
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[3])), "Didn't save node")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])), "Didn't save node")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])), "Didn't save node")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])), "Didn't save node")
|
||||
@@ -822,8 +898,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = protoarray.New()
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -847,14 +922,15 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: roots[0]}, [32]byte{})
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
// 4 nodes from the block tree 1. B3 - B4 - B6 - B8
|
||||
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
// Ensure all roots and their respective blocks exist.
|
||||
wantedRoots := [][]byte{roots[0], roots[3], roots[4], roots[6], roots[8]}
|
||||
wantedRoots := [][]byte{roots[3], roots[4], roots[6], roots[8]}
|
||||
for i, rt := range wantedRoots {
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(rt)), fmt.Sprintf("Didn't save node: %d", i))
|
||||
assert.Equal(t, true, service.cfg.BeaconDB.HasBlock(context.Background(), bytesutil.ToBytes32(rt)))
|
||||
@@ -871,8 +947,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -896,14 +971,15 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: roots[0]}, [32]byte{})
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
// 5 nodes from the block tree 1. B3 - B4 - B6 - B8
|
||||
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
// Ensure all roots and their respective blocks exist.
|
||||
wantedRoots := [][]byte{roots[0], roots[3], roots[4], roots[6], roots[8]}
|
||||
wantedRoots := [][]byte{roots[3], roots[4], roots[6], roots[8]}
|
||||
for i, rt := range wantedRoots {
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(rt)), fmt.Sprintf("Didn't save node: %d", i))
|
||||
assert.Equal(t, true, service.cfg.BeaconDB.HasBlock(context.Background(), bytesutil.ToBytes32(rt)))
|
||||
@@ -920,9 +996,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
|
||||
// Set finalized epoch to 1.
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 1}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = protoarray.New()
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -936,7 +1010,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
|
||||
// Define a tree branch, slot 63 <- 64 <- 65
|
||||
// Define a tree branch, slot 63 <- 64 <- 65 <- 66
|
||||
b63 := util.NewBeaconBlock()
|
||||
b63.Block.Slot = 63
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b63)
|
||||
@@ -955,20 +1029,28 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
|
||||
b65 := util.NewBeaconBlock()
|
||||
b65.Block.Slot = 65
|
||||
b65.Block.ParentRoot = r64[:]
|
||||
r65, err := b65.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b65)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
b66 := util.NewBeaconBlock()
|
||||
b66.Block.Slot = 66
|
||||
b66.Block.ParentRoot = r65[:]
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b66)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
// Set finalized epoch to 2.
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 2, Root: r64[:]}, [32]byte{})
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// There should be 2 nodes, block 65 and block 64.
|
||||
assert.Equal(t, 2, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
|
||||
// Block with slot 63 should be in fork choice because it's less than finalized epoch 1.
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r63), "Didn't save node")
|
||||
// We should have saved 1 node: block 65
|
||||
assert.Equal(t, 1, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r65), "Didn't save node")
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing.T) {
|
||||
@@ -981,9 +1063,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
// Set finalized epoch to 1.
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 1}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -1016,27 +1096,75 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing
|
||||
b65 := util.NewBeaconBlock()
|
||||
b65.Block.Slot = 65
|
||||
b65.Block.ParentRoot = r64[:]
|
||||
r65, err := b65.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b65)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
b66 := util.NewBeaconBlock()
|
||||
b66.Block.Slot = 66
|
||||
b66.Block.ParentRoot = r65[:]
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b66)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
|
||||
// Set finalized epoch to 1.
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 2, Root: r64[:]}, [32]byte{})
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// There should be 2 nodes, block 65 and block 64.
|
||||
assert.Equal(t, 2, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
// There should be 1 node: block 65
|
||||
assert.Equal(t, 1, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r65), "Didn't save node")
|
||||
}
|
||||
|
||||
// Block with slot 63 should be in fork choice because it's less than finalized epoch 1.
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r63), "Didn't save node")
|
||||
func TestFillForkChoiceMissingBlocks_FinalizedSibling_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 9
|
||||
blk.Block.ParentRoot = roots[8]
|
||||
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: roots[1]}, [32]byte{})
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.ErrorIs(t, errNotDescendantOfFinalized, err)
|
||||
}
|
||||
|
||||
// blockTree1 constructs the following tree:
|
||||
// /- B1
|
||||
// B0 /- B5 - B7
|
||||
// \- B3 - B4 - B6 - B8
|
||||
// (B1, and B3 are all from the same slots)
|
||||
func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byte, error) {
|
||||
genesisRoot = bytesutil.PadTo(genesisRoot, 32)
|
||||
b0 := util.NewBeaconBlock()
|
||||
@@ -1144,7 +1272,7 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -1219,7 +1347,9 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), params.BeaconConfig().ZeroHash, 0, 0)) // Saves blocks to fork choice store.
|
||||
state, blkRoot, err := prepareForkchoiceState(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
}
|
||||
|
||||
r, err := service.ancestor(context.Background(), r200[:], 150)
|
||||
@@ -1233,7 +1363,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -1266,7 +1396,9 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb)) // Saves blocks to DB.
|
||||
}
|
||||
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(context.Background(), 200, r200, r200, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(context.Background(), 200, r200, r200, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
r, err := service.ancestor(context.Background(), r200[:], 150)
|
||||
require.NoError(t, err)
|
||||
@@ -1293,7 +1425,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -1438,7 +1570,7 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
|
||||
func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
@@ -1447,6 +1579,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -1494,7 +1627,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
func TestOnBlock_NilBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
@@ -1513,7 +1646,7 @@ func TestOnBlock_NilBlock(t *testing.T) {
|
||||
func TestOnBlock_InvalidSignature(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
@@ -1554,7 +1687,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
@@ -1563,6 +1696,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -1685,11 +1819,6 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemoveBlockAttestationsInPool_Canonical(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
CorrectlyPruneCanonicalAtts: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
@@ -1711,11 +1840,6 @@ func TestRemoveBlockAttestationsInPool_Canonical(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemoveBlockAttestationsInPool_NonCanonical(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
CorrectlyPruneCanonicalAtts: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
@@ -1792,12 +1916,13 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -1920,7 +2045,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -1971,7 +2096,7 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
|
||||
func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
@@ -1980,6 +2105,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -2043,7 +2169,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r2))
|
||||
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r3))
|
||||
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r4))
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
|
||||
service.cfg.ForkChoiceStore = protoarray.New()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2051,7 +2177,7 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
|
||||
@@ -164,21 +164,26 @@ func (s *Service) UpdateHead(ctx context.Context) error {
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Resolving fork due to new attestation")
|
||||
}
|
||||
s.headLock.RLock()
|
||||
if s.headRoot() != newHeadRoot {
|
||||
log.WithFields(logrus.Fields{
|
||||
"oldHeadRoot": fmt.Sprintf("%#x", s.headRoot()),
|
||||
"newHeadRoot": fmt.Sprintf("%#x", newHeadRoot),
|
||||
}).Debug("Head changed due to attestations")
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
s.notifyEngineIfChangedHead(ctx, newHeadRoot)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This calls notify Forkchoice Update in the event that the head has changed
|
||||
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) {
|
||||
s.headLock.RLock()
|
||||
if newHeadRoot == [32]byte{} || s.headRoot() == newHeadRoot {
|
||||
s.headLock.RUnlock()
|
||||
return
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
|
||||
if !s.hasBlockInInitSyncOrDB(ctx, newHeadRoot) {
|
||||
log.Debug("New head does not exist in DB. Do nothing")
|
||||
|
||||
@@ -120,7 +120,9 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
service.processAttestations(ctx)
|
||||
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations()))
|
||||
@@ -225,7 +227,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: tRoot[:]}))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
b := util.NewBeaconBlock()
|
||||
wb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
@@ -233,6 +237,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wb))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wb.Block().Slot(), r, bytesutil.ToBytes32(wb.Block().ParentRoot()), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
service.head.root = r // Old head
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.ForkchoiceAttestations()))
|
||||
require.NoError(t, err, service.UpdateHead(ctx))
|
||||
|
||||
@@ -71,11 +71,15 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaco
|
||||
|
||||
// Log block sync status.
|
||||
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
|
||||
return err
|
||||
log.WithError(err).Error("Unable to log block sync status")
|
||||
}
|
||||
// Log payload data
|
||||
if err := logPayload(blockCopy.Block()); err != nil {
|
||||
log.WithError(err).Error("Unable to log debug block payload data")
|
||||
}
|
||||
// Log state transition data.
|
||||
if err := logStateTransitionData(blockCopy.Block()); err != nil {
|
||||
return err
|
||||
log.WithError(err).Error("Unable to log state transition data")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -89,8 +93,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
|
||||
defer span.End()
|
||||
|
||||
// Apply state transition on the incoming newly received block batches, one by one.
|
||||
_, _, err := s.onBlockBatch(ctx, blocks, blkRoots)
|
||||
if err != nil {
|
||||
if err := s.onBlockBatch(ctx, blocks, blkRoots); err != nil {
|
||||
err := errors.Wrap(err, "could not process block in batch")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
|
||||
@@ -127,7 +127,7 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0)),
|
||||
WithForkChoiceStore(protoarray.New()),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
@@ -168,7 +168,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0)),
|
||||
WithForkChoiceStore(protoarray.New()),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
@@ -248,7 +248,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0)),
|
||||
WithForkChoiceStore(protoarray.New()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
f "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
@@ -40,7 +41,6 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/time"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -138,19 +138,25 @@ func (s *Service) Start() {
|
||||
}
|
||||
}
|
||||
s.spawnProcessAttestationsRoutine(s.cfg.StateNotifier.StateFeed())
|
||||
s.fillMissingPayloadIDRoutine(s.ctx, s.cfg.StateNotifier.StateFeed())
|
||||
}
|
||||
|
||||
// Stop the blockchain service's main event loop and associated goroutines.
|
||||
func (s *Service) Stop() error {
|
||||
defer s.cancel()
|
||||
|
||||
// lock before accessing s.head, s.head.state, s.head.state.FinalizedCheckpoint().Root
|
||||
s.headLock.RLock()
|
||||
if s.cfg.StateGen != nil && s.head != nil && s.head.state != nil {
|
||||
r := s.head.state.FinalizedCheckpoint().Root
|
||||
s.headLock.RUnlock()
|
||||
// Save the last finalized state so that starting up in the following run will be much faster.
|
||||
if err := s.cfg.StateGen.ForceCheckpoint(s.ctx, s.head.state.FinalizedCheckpoint().Root); err != nil {
|
||||
if err := s.cfg.StateGen.ForceCheckpoint(s.ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
s.headLock.RUnlock()
|
||||
}
|
||||
|
||||
// Save initial sync cached blocks to the DB before stop.
|
||||
return s.cfg.BeaconDB.SaveBlocks(s.ctx, s.getInitSyncBlocks())
|
||||
}
|
||||
@@ -201,24 +207,27 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
s.store = store.New(justified, finalized)
|
||||
|
||||
var forkChoicer f.ForkChoicer
|
||||
fRoot := bytesutil.ToBytes32(finalized.Root)
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
if features.Get().EnableForkChoiceDoublyLinkedTree {
|
||||
forkChoicer = doublylinkedtree.New(justified.Epoch, finalized.Epoch)
|
||||
forkChoicer = doublylinkedtree.New()
|
||||
} else {
|
||||
forkChoicer = protoarray.New(justified.Epoch, finalized.Epoch)
|
||||
forkChoicer = protoarray.New()
|
||||
}
|
||||
s.cfg.ForkChoiceStore = forkChoicer
|
||||
fb, err := s.getBlock(s.ctx, s.ensureRootNotZeros(fRoot))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint block")
|
||||
if err := forkChoicer.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
|
||||
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
}
|
||||
payloadHash, err := getBlockPayloadHash(fb.Block())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get execution payload hash")
|
||||
if err := forkChoicer.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
|
||||
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
|
||||
}
|
||||
fSlot := fb.Block().Slot()
|
||||
if err := forkChoicer.InsertOptimisticBlock(s.ctx, fSlot, fRoot, params.BeaconConfig().ZeroHash,
|
||||
payloadHash, justified.Epoch, finalized.Epoch); err != nil {
|
||||
|
||||
st, err := s.cfg.StateGen.StateByRoot(s.ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint state")
|
||||
}
|
||||
if err := forkChoicer.InsertNode(s.ctx, st, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not insert finalized block to forkchoice")
|
||||
}
|
||||
|
||||
@@ -231,18 +240,6 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
return errors.Wrap(err, "could not set finalized block as validated")
|
||||
}
|
||||
}
|
||||
|
||||
h := s.headBlock().Block()
|
||||
if h.Slot() > fSlot {
|
||||
log.WithFields(logrus.Fields{
|
||||
"startSlot": fSlot,
|
||||
"endSlot": h.Slot(),
|
||||
}).Info("Loading blocks to fork choice store, this may take a while.")
|
||||
if err := s.fillInForkChoiceMissingBlocks(s.ctx, h, finalized, justified); err != nil {
|
||||
return errors.Wrap(err, "could not fill in fork choice store missing blocks")
|
||||
}
|
||||
}
|
||||
|
||||
// not attempting to save initial sync blocks here, because there shouldn't be any until
|
||||
// after the statefeed.Initialized event is fired (below)
|
||||
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, finalized.Epoch); err != nil {
|
||||
@@ -445,7 +442,7 @@ func (s *Service) initializeBeaconChain(
|
||||
s.cfg.ChainStartFetcher.ClearPreGenesisData()
|
||||
|
||||
// Update committee shuffled indices for genesis epoch.
|
||||
if err := helpers.UpdateCommitteeCache(genesisState, 0 /* genesis epoch */); err != nil {
|
||||
if err := helpers.UpdateCommitteeCache(ctx, genesisState, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState); err != nil {
|
||||
@@ -478,17 +475,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
genesisCheckpoint := genesisState.FinalizedCheckpoint()
|
||||
s.store = store.New(genesisCheckpoint, genesisCheckpoint)
|
||||
|
||||
payloadHash, err := getBlockPayloadHash(genesisBlk.Block())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
|
||||
genesisBlk.Block().Slot(),
|
||||
genesisBlkRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
payloadHash,
|
||||
genesisCheckpoint.Epoch,
|
||||
genesisCheckpoint.Epoch); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
log.Fatalf("Could not process genesis block for fork choice: %v", err)
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, genesisBlkRoot); err != nil {
|
||||
|
||||
@@ -130,7 +130,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(protoarray.New(0, 0)),
|
||||
WithForkChoiceStore(protoarray.New()),
|
||||
WithAttestationService(attService),
|
||||
WithStateGen(stateGen),
|
||||
}
|
||||
@@ -505,7 +505,7 @@ func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(0, 0), BeaconDB: beaconDB},
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
@@ -526,7 +526,7 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
@@ -599,7 +599,7 @@ func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(0, 0), BeaconDB: beaconDB},
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
@@ -622,7 +622,7 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
|
||||
@@ -62,6 +62,7 @@ type ChainService struct {
|
||||
Genesis time.Time
|
||||
ForkChoiceStore forkchoice.ForkChoicer
|
||||
ReceiveBlockMockErr error
|
||||
OptimisticCheckRootReceived [32]byte
|
||||
}
|
||||
|
||||
// ForkChoicer mocks the same method in the chain service
|
||||
@@ -447,7 +448,8 @@ func (s *ChainService) IsOptimistic(_ context.Context) (bool, error) {
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot mocks the same method in the chain service.
|
||||
func (s *ChainService) IsOptimisticForRoot(_ context.Context, _ [32]byte) (bool, error) {
|
||||
func (s *ChainService) IsOptimisticForRoot(_ context.Context, root [32]byte) (bool, error) {
|
||||
s.OptimisticCheckRootReceived = root
|
||||
return s.Optimistic, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -30,8 +30,8 @@ type WeakSubjectivityVerifier struct {
|
||||
// NewWeakSubjectivityVerifier validates a checkpoint, and if valid, uses it to initialize a weak subjectivity verifier.
|
||||
func NewWeakSubjectivityVerifier(wsc *ethpb.Checkpoint, db weakSubjectivityDB) (*WeakSubjectivityVerifier, error) {
|
||||
if wsc == nil || len(wsc.Root) == 0 || wsc.Epoch == 0 {
|
||||
log.Info("No checkpoint for syncing provided, node will begin syncing from genesis. Checkpoint Sync is an optional feature that allows your node to sync from a more recent checkpoint, " +
|
||||
"which enhances the security of your local beacon node and the broader network. See https://docs.prylabs.network/docs/next/prysm-usage/checkpoint-sync/ to learn how to configure Checkpoint Sync.")
|
||||
log.Info("--weak-subjectivity-checkpoint not provided. Prysm recommends providing a weak subjectivity checkpoint" +
|
||||
"for nodes synced from genesis, or manual verification of block and state roots for checkpoint sync nodes.")
|
||||
return &WeakSubjectivityVerifier{
|
||||
enabled: false,
|
||||
}, nil
|
||||
|
||||
31
beacon-chain/builder/BUILD.bazel
Normal file
31
beacon-chain/builder/BUILD.bazel
Normal file
@@ -0,0 +1,31 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"error.go",
|
||||
"metric.go",
|
||||
"option.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/builder",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client/builder:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/authorization:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
7
beacon-chain/builder/error.go
Normal file
7
beacon-chain/builder/error.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package builder
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
var (
|
||||
ErrNotRunning = errors.New("builder is not running")
|
||||
)
|
||||
37
beacon-chain/builder/metric.go
Normal file
37
beacon-chain/builder/metric.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
submitBlindedBlockLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "submit_blinded_block_latency_milliseconds",
|
||||
Help: "Captures RPC latency for submitting blinded block in milliseconds",
|
||||
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
|
||||
},
|
||||
)
|
||||
getHeaderLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "get_header_latency_milliseconds",
|
||||
Help: "Captures RPC latency for get header in milliseconds",
|
||||
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
|
||||
},
|
||||
)
|
||||
getStatusLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "get_status_latency_milliseconds",
|
||||
Help: "Captures RPC latency for get status in milliseconds",
|
||||
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
|
||||
},
|
||||
)
|
||||
registerValidatorLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "register_validator_latency_milliseconds",
|
||||
Help: "Captures RPC latency for register validator in milliseconds",
|
||||
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
|
||||
},
|
||||
)
|
||||
)
|
||||
45
beacon-chain/builder/option.go
Normal file
45
beacon-chain/builder/option.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
"github.com/prysmaticlabs/prysm/network/authorization"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
type Option func(s *Service) error
|
||||
|
||||
// FlagOptions for builder service flag configurations.
|
||||
func FlagOptions(c *cli.Context) ([]Option, error) {
|
||||
endpoint := c.String(flags.MevRelayEndpoint.Name)
|
||||
opts := []Option{
|
||||
WithBuilderEndpoints(endpoint),
|
||||
}
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
// WithBuilderEndpoints sets the endpoint for the beacon chain builder service.
|
||||
func WithBuilderEndpoints(endpoint string) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.builderEndpoint = covertEndPoint(endpoint)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDatabase sets the database for the beacon chain builder service.
|
||||
func WithDatabase(database db.HeadAccessDatabase) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.beaconDB = database
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func covertEndPoint(ep string) network.Endpoint {
|
||||
return network.Endpoint{
|
||||
Url: ep,
|
||||
Auth: network.AuthorizationData{ // Auth is not used for builder.
|
||||
Method: authorization.None,
|
||||
Value: "",
|
||||
}}
|
||||
}
|
||||
134
beacon-chain/builder/service.go
Normal file
134
beacon-chain/builder/service.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/api/client/builder"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// BlockBuilder defines the interface for interacting with the block builder
|
||||
type BlockBuilder interface {
|
||||
SubmitBlindedBlock(ctx context.Context, block *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error)
|
||||
GetHeader(ctx context.Context, slot types.Slot, parentHash [32]byte, pubKey [48]byte) (*ethpb.SignedBuilderBid, error)
|
||||
Status(ctx context.Context) error
|
||||
RegisterValidator(ctx context.Context, reg *ethpb.SignedValidatorRegistrationV1) error
|
||||
}
|
||||
|
||||
// config defines a config struct for dependencies into the service.
|
||||
type config struct {
|
||||
builderEndpoint network.Endpoint
|
||||
beaconDB db.HeadAccessDatabase
|
||||
headFetcher blockchain.HeadFetcher
|
||||
}
|
||||
|
||||
// Service defines a service that provides a client for interacting with the beacon chain and MEV relay network.
|
||||
type Service struct {
|
||||
cfg *config
|
||||
c *builder.Client
|
||||
}
|
||||
|
||||
// NewService instantiates a new service.
|
||||
func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
s := &Service{}
|
||||
for _, opt := range opts {
|
||||
if err := opt(s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if s.cfg.builderEndpoint.Url != "" {
|
||||
c, err := builder.NewClient(s.cfg.builderEndpoint.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.c = c
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Start initializes the service.
|
||||
func (*Service) Start() {}
|
||||
|
||||
// Stop halts the service.
|
||||
func (*Service) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock submits a blinded block to the builder relay network.
|
||||
func (s *Service) SubmitBlindedBlock(ctx context.Context, b *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlock")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
submitBlindedBlockLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
return s.c.SubmitBlindedBlock(ctx, b)
|
||||
}
|
||||
|
||||
// GetHeader retrieves the header for a given slot and parent hash from the builder relay network.
|
||||
func (s *Service) GetHeader(ctx context.Context, slot types.Slot, parentHash [32]byte, pubKey [48]byte) (*ethpb.SignedBuilderBid, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.GetHeader")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
getHeaderLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
return s.c.GetHeader(ctx, slot, parentHash, pubKey)
|
||||
}
|
||||
|
||||
// Status retrieves the status of the builder relay network.
|
||||
func (s *Service) Status(ctx context.Context) error {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.Status")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
getStatusLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
return s.c.Status(ctx)
|
||||
}
|
||||
|
||||
// RegisterValidator registers a validator with the builder relay network.
|
||||
// It also saves the registration object to the DB.
|
||||
func (s *Service) RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.RegisterValidator")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
registerValidatorLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
idxs := make([]types.ValidatorIndex, 0)
|
||||
msgs := make([]*ethpb.ValidatorRegistrationV1, 0)
|
||||
valid := make([]*ethpb.SignedValidatorRegistrationV1, 0)
|
||||
for i := 0; i < len(reg); i++ {
|
||||
r := reg[i]
|
||||
nx, exists := s.cfg.headFetcher.HeadPublicKeyToValidatorIndex(bytesutil.ToBytes48(r.Message.Pubkey))
|
||||
if !exists {
|
||||
// we want to allow validators to set up keys that haven't been added to the beaconstate validator list yet,
|
||||
// so we should tolerate keys that do not seem to be valid by skipping past them.
|
||||
log.Warnf("Skipping validator registration for pubkey=%#x - not in current validator set.", r.Message.Pubkey)
|
||||
continue
|
||||
}
|
||||
idxs = append(idxs, nx)
|
||||
msgs = append(msgs, r.Message)
|
||||
valid = append(valid, r)
|
||||
}
|
||||
if err := s.c.RegisterValidator(ctx, valid); err != nil {
|
||||
return errors.Wrap(err, "could not register validator(s)")
|
||||
}
|
||||
|
||||
return s.cfg.beaconDB.SaveRegistrationsByValidatorIDs(ctx, idxs, msgs)
|
||||
}
|
||||
1
beacon-chain/cache/active_balance.go
vendored
1
beacon-chain/cache/active_balance.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
1
beacon-chain/cache/active_balance_test.go
vendored
1
beacon-chain/cache/active_balance_test.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
6
beacon-chain/cache/committee.go
vendored
6
beacon-chain/cache/committee.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
@@ -103,9 +102,12 @@ func (c *CommitteeCache) Committee(ctx context.Context, slot types.Slot, seed [3
|
||||
|
||||
// AddCommitteeShuffledList adds Committee shuffled list object to the cache. T
|
||||
// his method also trims the least recently list if the cache size has ready the max cache size limit.
|
||||
func (c *CommitteeCache) AddCommitteeShuffledList(committees *Committees) error {
|
||||
func (c *CommitteeCache) AddCommitteeShuffledList(ctx context.Context, committees *Committees) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
key, err := committeeKeyFn(committees)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
3
beacon-chain/cache/committee_disabled.go
vendored
3
beacon-chain/cache/committee_disabled.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
// This file is used in fuzzer builds to bypass global committee caches.
|
||||
package cache
|
||||
@@ -27,7 +26,7 @@ func (c *FakeCommitteeCache) Committee(ctx context.Context, slot types.Slot, see
|
||||
|
||||
// AddCommitteeShuffledList adds Committee shuffled list object to the cache. T
|
||||
// his method also trims the least recently list if the cache size has ready the max cache size limit.
|
||||
func (c *FakeCommitteeCache) AddCommitteeShuffledList(committees *Committees) error {
|
||||
func (c *FakeCommitteeCache) AddCommitteeShuffledList(ctx context.Context, committees *Committees) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
5
beacon-chain/cache/committee_fuzz_test.go
vendored
5
beacon-chain/cache/committee_fuzz_test.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
@@ -31,7 +30,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
fuzzer.Fuzz(c)
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(c))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), c))
|
||||
_, err := cache.Committee(context.Background(), 0, c.Seed, 0)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -46,7 +45,7 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
fuzzer.Fuzz(c)
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(c))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), c))
|
||||
|
||||
indices, err := cache.ActiveIndices(context.Background(), c.Seed)
|
||||
require.NoError(t, err)
|
||||
|
||||
26
beacon-chain/cache/committee_test.go
vendored
26
beacon-chain/cache/committee_test.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
@@ -50,7 +49,7 @@ func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
|
||||
if indices != nil {
|
||||
t.Error("Expected committee not to exist in empty cache")
|
||||
}
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(item))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
|
||||
|
||||
wantedIndex := types.CommitteeIndex(0)
|
||||
indices, err = cache.Committee(context.Background(), slot, item.Seed, wantedIndex)
|
||||
@@ -70,7 +69,7 @@ func TestCommitteeCache_ActiveIndices(t *testing.T) {
|
||||
t.Error("Expected committee not to exist in empty cache")
|
||||
}
|
||||
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(item))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
|
||||
|
||||
indices, err = cache.ActiveIndices(context.Background(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
@@ -85,7 +84,7 @@ func TestCommitteeCache_ActiveCount(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, count, "Expected active count not to exist in empty cache")
|
||||
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(item))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
|
||||
|
||||
count, err = cache.ActiveIndicesCount(context.Background(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
@@ -101,7 +100,7 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
for i := start; i < end; i++ {
|
||||
s := []byte(strconv.Itoa(i))
|
||||
item := &Committees{Seed: bytesutil.ToBytes32(s)}
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(item))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
|
||||
}
|
||||
|
||||
k := cache.CommitteeCache.Keys()
|
||||
@@ -134,3 +133,20 @@ func TestCommitteeCacheOutOfRange(t *testing.T) {
|
||||
_, err = cache.Committee(context.Background(), 0, seed, math.MaxUint64) // Overflow!
|
||||
require.NotNil(t, err, "Did not fail as expected")
|
||||
}
|
||||
|
||||
func TestCommitteeCache_DoesNothingWhenCancelledContext(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []types.ValidatorIndex{1, 2, 3, 4, 5, 6}}
|
||||
count, err := cache.ActiveIndicesCount(context.Background(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, count, "Expected active count not to exist in empty cache")
|
||||
|
||||
cancelled, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
require.ErrorIs(t, cache.AddCommitteeShuffledList(cancelled, item), context.Canceled)
|
||||
|
||||
count, err = cache.ActiveIndicesCount(context.Background(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, count)
|
||||
}
|
||||
|
||||
1
beacon-chain/cache/proposer_indices.go
vendored
1
beacon-chain/cache/proposer_indices.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
// This file is used in fuzzer builds to bypass proposer indices caches.
|
||||
package cache
|
||||
|
||||
1
beacon-chain/cache/proposer_indices_test.go
vendored
1
beacon-chain/cache/proposer_indices_test.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
1
beacon-chain/cache/sync_committee.go
vendored
1
beacon-chain/cache/sync_committee.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -47,7 +47,7 @@ func InitializePrecomputeValidators(ctx context.Context, beaconState state.Beaco
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Set validator's active status for preivous epoch.
|
||||
// Set validator's active status for previous epoch.
|
||||
if helpers.IsActiveValidatorUsingTrie(val, prevEpoch) {
|
||||
v.IsActivePrevEpoch = true
|
||||
bal.ActivePrevEpoch, err = math.Add64(bal.ActivePrevEpoch, val.EffectiveBalance())
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/forks/bellatrix"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
@@ -271,3 +272,16 @@ func ProcessPayloadHeader(st state.BeaconState, header *ethpb.ExecutionPayloadHe
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// GetBlockPayloadHash returns the hash of the execution payload of the block
|
||||
func GetBlockPayloadHash(blk interfaces.BeaconBlock) ([32]byte, error) {
|
||||
payloadHash := [32]byte{}
|
||||
if IsPreBellatrixVersion(blk.Version()) {
|
||||
return payloadHash, nil
|
||||
}
|
||||
payload, err := blk.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return payloadHash, err
|
||||
}
|
||||
return bytesutil.ToBytes32(payload.BlockHash), nil
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -38,16 +39,19 @@ go_test(
|
||||
"attestation_test.go",
|
||||
"justification_finalization_test.go",
|
||||
"new_test.go",
|
||||
"precompute_test.go",
|
||||
"reward_penalty_test.go",
|
||||
"slashing_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -2,6 +2,7 @@ package precompute
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
@@ -9,6 +10,24 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
var errNilState = errors.New("nil state")
|
||||
|
||||
// UnrealizedCheckpoints returns the justification and finalization checkpoints of the
|
||||
// given state as if it was progressed with empty slots until the next epoch.
|
||||
func UnrealizedCheckpoints(st state.BeaconState) (*ethpb.Checkpoint, *ethpb.Checkpoint, error) {
|
||||
if st == nil || st.IsNil() {
|
||||
return nil, nil, errNilState
|
||||
}
|
||||
|
||||
activeBalance, prevTarget, currentTarget, err := st.UnrealizedCheckpointBalances()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
justification := processJustificationBits(st, activeBalance, prevTarget, currentTarget)
|
||||
return computeCheckpoints(st, justification)
|
||||
}
|
||||
|
||||
// ProcessJustificationAndFinalizationPreCompute processes justification and finalization during
|
||||
// epoch processing. This is where a beacon node can justify and finalize a new epoch.
|
||||
// Note: this is an optimized version by passing in precomputed total and attesting balances.
|
||||
@@ -34,12 +53,55 @@ func ProcessJustificationAndFinalizationPreCompute(state state.BeaconState, pBal
|
||||
return state, nil
|
||||
}
|
||||
|
||||
return weighJustificationAndFinalization(state, pBal.ActiveCurrentEpoch, pBal.PrevEpochTargetAttested, pBal.CurrentEpochTargetAttested)
|
||||
newBits := processJustificationBits(state, pBal.ActiveCurrentEpoch, pBal.PrevEpochTargetAttested, pBal.CurrentEpochTargetAttested)
|
||||
|
||||
return weighJustificationAndFinalization(state, newBits)
|
||||
}
|
||||
|
||||
// weighJustificationAndFinalization processes justification and finalization during
|
||||
// processJustificationBits processes the justification bits during epoch processing.
|
||||
func processJustificationBits(state state.BeaconState, totalActiveBalance, prevEpochTargetBalance, currEpochTargetBalance uint64) bitfield.Bitvector4 {
|
||||
newBits := state.JustificationBits()
|
||||
newBits.Shift(1)
|
||||
// If 2/3 or more of total balance attested in the previous epoch.
|
||||
if 3*prevEpochTargetBalance >= 2*totalActiveBalance {
|
||||
newBits.SetBitAt(1, true)
|
||||
}
|
||||
|
||||
if 3*currEpochTargetBalance >= 2*totalActiveBalance {
|
||||
newBits.SetBitAt(0, true)
|
||||
}
|
||||
|
||||
return newBits
|
||||
}
|
||||
|
||||
// updateJustificationAndFinalization processes justification and finalization during
|
||||
// epoch processing. This is where a beacon node can justify and finalize a new epoch.
|
||||
//
|
||||
func weighJustificationAndFinalization(state state.BeaconState, newBits bitfield.Bitvector4) (state.BeaconState, error) {
|
||||
jc, fc, err := computeCheckpoints(state, newBits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := state.SetPreviousJustifiedCheckpoint(state.CurrentJustifiedCheckpoint()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := state.SetCurrentJustifiedCheckpoint(jc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := state.SetJustificationBits(newBits); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := state.SetFinalizedCheckpoint(fc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// computeCheckpoints computes the new Justification and Finalization
|
||||
// checkpoints at epoch transition
|
||||
// Spec pseudocode definition:
|
||||
// def weigh_justification_and_finalization(state: BeaconState,
|
||||
// total_active_balance: Gwei,
|
||||
@@ -77,88 +139,57 @@ func ProcessJustificationAndFinalizationPreCompute(state state.BeaconState, pBal
|
||||
// # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source
|
||||
// if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch:
|
||||
// state.finalized_checkpoint = old_current_justified_checkpoint
|
||||
func weighJustificationAndFinalization(state state.BeaconState,
|
||||
totalActiveBalance, prevEpochTargetBalance, currEpochTargetBalance uint64) (state.BeaconState, error) {
|
||||
func computeCheckpoints(state state.BeaconState, newBits bitfield.Bitvector4) (*ethpb.Checkpoint, *ethpb.Checkpoint, error) {
|
||||
prevEpoch := time.PrevEpoch(state)
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
oldPrevJustifiedCheckpoint := state.PreviousJustifiedCheckpoint()
|
||||
oldCurrJustifiedCheckpoint := state.CurrentJustifiedCheckpoint()
|
||||
|
||||
// Process justifications
|
||||
if err := state.SetPreviousJustifiedCheckpoint(state.CurrentJustifiedCheckpoint()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newBits := state.JustificationBits()
|
||||
newBits.Shift(1)
|
||||
if err := state.SetJustificationBits(newBits); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Note: the spec refers to the bit index position starting at 1 instead of starting at zero.
|
||||
// We will use that paradigm here for consistency with the godoc spec definition.
|
||||
|
||||
// If 2/3 or more of total balance attested in the previous epoch.
|
||||
if 3*prevEpochTargetBalance >= 2*totalActiveBalance {
|
||||
blockRoot, err := helpers.BlockRoot(state, prevEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root for previous epoch %d", prevEpoch)
|
||||
}
|
||||
if err := state.SetCurrentJustifiedCheckpoint(ðpb.Checkpoint{Epoch: prevEpoch, Root: blockRoot}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newBits = state.JustificationBits()
|
||||
newBits.SetBitAt(1, true)
|
||||
if err := state.SetJustificationBits(newBits); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
justifiedCheckpoint := state.CurrentJustifiedCheckpoint()
|
||||
finalizedCheckpoint := state.FinalizedCheckpoint()
|
||||
|
||||
// If 2/3 or more of the total balance attested in the current epoch.
|
||||
if 3*currEpochTargetBalance >= 2*totalActiveBalance {
|
||||
if newBits.BitAt(0) && currentEpoch >= justifiedCheckpoint.Epoch {
|
||||
blockRoot, err := helpers.BlockRoot(state, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root for current epoch %d", prevEpoch)
|
||||
return nil, nil, errors.Wrapf(err, "could not get block root for current epoch %d", currentEpoch)
|
||||
}
|
||||
if err := state.SetCurrentJustifiedCheckpoint(ðpb.Checkpoint{Epoch: currentEpoch, Root: blockRoot}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newBits = state.JustificationBits()
|
||||
newBits.SetBitAt(0, true)
|
||||
if err := state.SetJustificationBits(newBits); err != nil {
|
||||
return nil, err
|
||||
justifiedCheckpoint.Epoch = currentEpoch
|
||||
justifiedCheckpoint.Root = blockRoot
|
||||
} else if newBits.BitAt(1) && prevEpoch >= justifiedCheckpoint.Epoch {
|
||||
// If 2/3 or more of total balance attested in the previous epoch.
|
||||
blockRoot, err := helpers.BlockRoot(state, prevEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get block root for previous epoch %d", prevEpoch)
|
||||
}
|
||||
justifiedCheckpoint.Epoch = prevEpoch
|
||||
justifiedCheckpoint.Root = blockRoot
|
||||
}
|
||||
|
||||
// Process finalization according to Ethereum Beacon Chain specification.
|
||||
justification := state.JustificationBits().Bytes()[0]
|
||||
if len(newBits) == 0 {
|
||||
return nil, nil, errors.New("empty justification bits")
|
||||
}
|
||||
justification := newBits.Bytes()[0]
|
||||
|
||||
// 2nd/3rd/4th (0b1110) most recent epochs are justified, the 2nd using the 4th as source.
|
||||
if justification&0x0E == 0x0E && (oldPrevJustifiedCheckpoint.Epoch+3) == currentEpoch {
|
||||
if err := state.SetFinalizedCheckpoint(oldPrevJustifiedCheckpoint); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
finalizedCheckpoint = oldPrevJustifiedCheckpoint
|
||||
}
|
||||
|
||||
// 2nd/3rd (0b0110) most recent epochs are justified, the 2nd using the 3rd as source.
|
||||
if justification&0x06 == 0x06 && (oldPrevJustifiedCheckpoint.Epoch+2) == currentEpoch {
|
||||
if err := state.SetFinalizedCheckpoint(oldPrevJustifiedCheckpoint); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
finalizedCheckpoint = oldPrevJustifiedCheckpoint
|
||||
}
|
||||
|
||||
// 1st/2nd/3rd (0b0111) most recent epochs are justified, the 1st using the 3rd as source.
|
||||
if justification&0x07 == 0x07 && (oldCurrJustifiedCheckpoint.Epoch+2) == currentEpoch {
|
||||
if err := state.SetFinalizedCheckpoint(oldCurrJustifiedCheckpoint); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
finalizedCheckpoint = oldCurrJustifiedCheckpoint
|
||||
}
|
||||
|
||||
// The 1st/2nd (0b0011) most recent epochs are justified, the 1st using the 2nd as source
|
||||
if justification&0x03 == 0x03 && (oldCurrJustifiedCheckpoint.Epoch+1) == currentEpoch {
|
||||
if err := state.SetFinalizedCheckpoint(oldCurrJustifiedCheckpoint); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
finalizedCheckpoint = oldCurrJustifiedCheckpoint
|
||||
}
|
||||
|
||||
return state, nil
|
||||
return justifiedCheckpoint, finalizedCheckpoint, nil
|
||||
}
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
package precompute_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
@@ -123,3 +126,142 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyPrevEpoch(t *testi
|
||||
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], newState.FinalizedCheckpoint().Root)
|
||||
assert.Equal(t, types.Epoch(0), newState.FinalizedCheckpointEpoch(), "Unexpected finalized epoch")
|
||||
}
|
||||
|
||||
func TestUnrealizedCheckpoints(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
balances := make([]uint64, len(validators))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
pjr := [32]byte{'p'}
|
||||
cjr := [32]byte{'c'}
|
||||
je := types.Epoch(3)
|
||||
fe := types.Epoch(2)
|
||||
pjcp := ðpb.Checkpoint{Root: pjr[:], Epoch: fe}
|
||||
cjcp := ðpb.Checkpoint{Root: cjr[:], Epoch: je}
|
||||
fcp := ðpb.Checkpoint{Root: pjr[:], Epoch: fe}
|
||||
tests := []struct {
|
||||
name string
|
||||
slot types.Slot
|
||||
prevVals, currVals int
|
||||
expectedJustified, expectedFinalized types.Epoch // The expected unrealized checkpoint epochs
|
||||
}{
|
||||
{
|
||||
"Not enough votes, keep previous justification",
|
||||
129,
|
||||
len(validators) / 3,
|
||||
len(validators) / 3,
|
||||
je,
|
||||
fe,
|
||||
},
|
||||
{
|
||||
"Not enough votes, keep previous justification, N+2",
|
||||
161,
|
||||
len(validators) / 3,
|
||||
len(validators) / 3,
|
||||
je,
|
||||
fe,
|
||||
},
|
||||
{
|
||||
"Enough to justify previous epoch but not current",
|
||||
129,
|
||||
2*len(validators)/3 + 3,
|
||||
len(validators) / 3,
|
||||
je,
|
||||
fe,
|
||||
},
|
||||
{
|
||||
"Enough to justify previous epoch but not current, N+2",
|
||||
161,
|
||||
2*len(validators)/3 + 3,
|
||||
len(validators) / 3,
|
||||
je + 1,
|
||||
fe,
|
||||
},
|
||||
{
|
||||
"Enough to justify current epoch",
|
||||
129,
|
||||
len(validators) / 3,
|
||||
2*len(validators)/3 + 3,
|
||||
je + 1,
|
||||
fe,
|
||||
},
|
||||
{
|
||||
"Enough to justify current epoch, but not previous",
|
||||
161,
|
||||
len(validators) / 3,
|
||||
2*len(validators)/3 + 3,
|
||||
je + 2,
|
||||
fe,
|
||||
},
|
||||
{
|
||||
"Enough to justify current and previous",
|
||||
161,
|
||||
2*len(validators)/3 + 3,
|
||||
2*len(validators)/3 + 3,
|
||||
je + 2,
|
||||
fe,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
base := ðpb.BeaconStateAltair{
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
|
||||
Validators: validators,
|
||||
Slot: test.slot,
|
||||
CurrentEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
|
||||
PreviousEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
|
||||
Balances: balances,
|
||||
PreviousJustifiedCheckpoint: pjcp,
|
||||
CurrentJustifiedCheckpoint: cjcp,
|
||||
FinalizedCheckpoint: fcp,
|
||||
InactivityScores: make([]uint64, len(validators)),
|
||||
JustificationBits: make(bitfield.Bitvector4, 1),
|
||||
}
|
||||
for i := 0; i < test.prevVals; i++ {
|
||||
base.PreviousEpochParticipation[i] = 0xFF
|
||||
}
|
||||
for i := 0; i < test.currVals; i++ {
|
||||
base.CurrentEpochParticipation[i] = 0xFF
|
||||
}
|
||||
if test.slot > 130 {
|
||||
base.JustificationBits.SetBitAt(2, true)
|
||||
base.JustificationBits.SetBitAt(3, true)
|
||||
} else {
|
||||
base.JustificationBits.SetBitAt(1, true)
|
||||
base.JustificationBits.SetBitAt(2, true)
|
||||
}
|
||||
|
||||
state, err := v2.InitializeFromProto(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = altair.InitializePrecomputeValidators(context.Background(), state)
|
||||
require.NoError(t, err)
|
||||
|
||||
jc, fc, err := precompute.UnrealizedCheckpoints(state)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, test.expectedJustified, jc.Epoch)
|
||||
require.DeepEqual(t, test.expectedFinalized, fc.Epoch)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ComputeCheckpoints_CantUpdateToLower(t *testing.T) {
|
||||
st, err := v2.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch * 2,
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{
|
||||
Epoch: 2,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
jb := make(bitfield.Bitvector4, 1)
|
||||
jb.SetBitAt(1, true)
|
||||
cp, _, err := precompute.ComputeCheckpoints(st, jb)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, types.Epoch(2), cp.Epoch)
|
||||
}
|
||||
|
||||
3
beacon-chain/core/epoch/precompute/precompute_test.go
Normal file
3
beacon-chain/core/epoch/precompute/precompute_test.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package precompute
|
||||
|
||||
var ComputeCheckpoints = computeCheckpoints
|
||||
@@ -285,7 +285,7 @@ func ShuffledIndices(s state.ReadOnlyBeaconState, epoch types.Epoch) ([]types.Va
|
||||
|
||||
// UpdateCommitteeCache gets called at the beginning of every epoch to cache the committee shuffled indices
|
||||
// list with committee index and epoch number. It caches the shuffled indices for current epoch and next epoch.
|
||||
func UpdateCommitteeCache(state state.ReadOnlyBeaconState, epoch types.Epoch) error {
|
||||
func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState, epoch types.Epoch) error {
|
||||
for _, e := range []types.Epoch{epoch, epoch + 1} {
|
||||
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
@@ -311,7 +311,7 @@ func UpdateCommitteeCache(state state.ReadOnlyBeaconState, epoch types.Epoch) er
|
||||
return sortedIndices[i] < sortedIndices[j]
|
||||
})
|
||||
|
||||
if err := committeeCache.AddCommitteeShuffledList(&cache.Committees{
|
||||
if err := committeeCache.AddCommitteeShuffledList(ctx, &cache.Committees{
|
||||
ShuffledIndices: shuffledIndices,
|
||||
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
|
||||
Seed: seed,
|
||||
|
||||
@@ -386,7 +386,7 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, UpdateCommitteeCache(state, time.CurrentEpoch(state)))
|
||||
require.NoError(t, UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state)))
|
||||
|
||||
epoch := types.Epoch(1)
|
||||
idx := types.CommitteeIndex(1)
|
||||
|
||||
@@ -76,6 +76,8 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Spec defines `EffectiveBalanceIncrement` as min to avoid divisions by zero.
|
||||
total = mathutil.Max(params.BeaconConfig().EffectiveBalanceIncrement, total)
|
||||
if err := balanceCache.AddTotalEffectiveBalance(s, total); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -74,6 +74,27 @@ func TestTotalActiveBalance(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestTotalActiveBal_ReturnMin(t *testing.T) {
|
||||
tests := []struct {
|
||||
vCount int
|
||||
}{
|
||||
{1},
|
||||
{10},
|
||||
{10000},
|
||||
}
|
||||
for _, test := range tests {
|
||||
validators := make([]*ethpb.Validator, 0)
|
||||
for i := 0; i < test.vCount; i++ {
|
||||
validators = append(validators, ðpb.Validator{EffectiveBalance: 1, ExitEpoch: 1})
|
||||
}
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
bal, err := TotalActiveBalance(state)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().EffectiveBalanceIncrement, bal)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTotalActiveBalance_WithCache(t *testing.T) {
|
||||
tests := []struct {
|
||||
vCount int
|
||||
|
||||
@@ -126,7 +126,7 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := UpdateCommitteeCache(s, epoch); err != nil {
|
||||
if err := UpdateCommitteeCache(ctx, s, epoch); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update committee cache")
|
||||
}
|
||||
|
||||
@@ -175,7 +175,7 @@ func ActiveValidatorCount(ctx context.Context, s state.ReadOnlyBeaconState, epoc
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := UpdateCommitteeCache(s, epoch); err != nil {
|
||||
if err := UpdateCommitteeCache(ctx, s, epoch); err != nil {
|
||||
return 0, errors.Wrap(err, "could not update committee cache")
|
||||
}
|
||||
|
||||
|
||||
@@ -301,7 +301,7 @@ func TestActiveValidatorCount_Genesis(t *testing.T) {
|
||||
// Preset cache to a bad count.
|
||||
seed, err := Seed(beaconState, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, committeeCache.AddCommitteeShuffledList(&cache.Committees{Seed: seed, ShuffledIndices: []types.ValidatorIndex{1, 2, 3}}))
|
||||
require.NoError(t, committeeCache.AddCommitteeShuffledList(context.Background(), &cache.Committees{Seed: seed, ShuffledIndices: []types.ValidatorIndex{1, 2, 3}}))
|
||||
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(c), validatorCount, "Did not get the correct validator count")
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"domain.go",
|
||||
"signature.go",
|
||||
"signing_root.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/signing",
|
||||
@@ -24,6 +25,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"domain_test.go",
|
||||
"signature_test.go",
|
||||
"signing_root_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
@@ -40,6 +42,7 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
33
beacon-chain/core/signing/signature.go
Normal file
33
beacon-chain/core/signing/signature.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package signing
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var ErrNilRegistration = errors.New("nil signed registration")
|
||||
|
||||
// VerifyRegistrationSignature verifies the signature of a validator's registration.
|
||||
func VerifyRegistrationSignature(
|
||||
e types.Epoch,
|
||||
f *ethpb.Fork,
|
||||
sr *ethpb.SignedValidatorRegistrationV1,
|
||||
genesisRoot []byte,
|
||||
) error {
|
||||
if sr == nil || sr.Message == nil {
|
||||
return ErrNilRegistration
|
||||
}
|
||||
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
sd, err := Domain(f, e, d, genesisRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := VerifySigningRoot(sr.Message, sr.Message.Pubkey, sr.Signature, sd); err != nil {
|
||||
return ErrSigFailedToVerify
|
||||
}
|
||||
return nil
|
||||
}
|
||||
44
beacon-chain/core/signing/signature_test.go
Normal file
44
beacon-chain/core/signing/signature_test.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package signing_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
func TestVerifyRegistrationSignature(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
reg := ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("fee"), 20),
|
||||
GasLimit: 123456,
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: sk.PublicKey().Marshal(),
|
||||
}
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
e := slots.ToEpoch(st.Slot())
|
||||
sig, err := signing.ComputeDomainAndSign(st, e, reg, d, sk)
|
||||
require.NoError(t, err)
|
||||
sReg := ðpb.SignedValidatorRegistrationV1{
|
||||
Message: reg,
|
||||
Signature: sig,
|
||||
}
|
||||
f := st.Fork()
|
||||
g := st.GenesisValidatorsRoot()
|
||||
require.NoError(t, signing.VerifyRegistrationSignature(e, f, sReg, g))
|
||||
|
||||
sReg.Signature = []byte("bad")
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(e, f, sReg, g), signing.ErrSigFailedToVerify)
|
||||
|
||||
sReg.Message = nil
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(e, f, sReg, g), signing.ErrNilRegistration)
|
||||
}
|
||||
@@ -53,7 +53,7 @@ func BenchmarkExecuteStateTransition_WithCache(b *testing.B) {
|
||||
// some attestations in block are from previous epoch
|
||||
currentSlot := beaconState.Slot()
|
||||
require.NoError(b, beaconState.SetSlot(beaconState.Slot()-params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(b, helpers.UpdateCommitteeCache(beaconState, time.CurrentEpoch(beaconState)))
|
||||
require.NoError(b, helpers.UpdateCommitteeCache(context.Background(), beaconState, time.CurrentEpoch(beaconState)))
|
||||
require.NoError(b, beaconState.SetSlot(currentSlot))
|
||||
// Run the state transition once to populate the cache.
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
|
||||
@@ -81,7 +81,7 @@ func BenchmarkProcessEpoch_2FullEpochs(b *testing.B) {
|
||||
// some attestations in block are from previous epoch
|
||||
currentSlot := beaconState.Slot()
|
||||
require.NoError(b, beaconState.SetSlot(beaconState.Slot()-params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(b, helpers.UpdateCommitteeCache(beaconState, time.CurrentEpoch(beaconState)))
|
||||
require.NoError(b, helpers.UpdateCommitteeCache(context.Background(), beaconState, time.CurrentEpoch(beaconState)))
|
||||
require.NoError(b, beaconState.SetSlot(currentSlot))
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
@@ -23,14 +23,14 @@ type ReadOnlyDatabase interface {
|
||||
Block(ctx context.Context, blockRoot [32]byte) (interfaces.SignedBeaconBlock, error)
|
||||
Blocks(ctx context.Context, f *filters.QueryFilter) ([]interfaces.SignedBeaconBlock, [][32]byte, error)
|
||||
BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32]byte, error)
|
||||
BlocksBySlot(ctx context.Context, slot types.Slot) (bool, []interfaces.SignedBeaconBlock, error)
|
||||
BlocksBySlot(ctx context.Context, slot types.Slot) ([]interfaces.SignedBeaconBlock, error)
|
||||
BlockRootsBySlot(ctx context.Context, slot types.Slot) (bool, [][32]byte, error)
|
||||
HasBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
GenesisBlock(ctx context.Context) (interfaces.SignedBeaconBlock, error)
|
||||
GenesisBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (interfaces.SignedBeaconBlock, error)
|
||||
HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]interfaces.SignedBeaconBlock, error)
|
||||
HighestRootsBelowSlot(ctx context.Context, slot types.Slot) (types.Slot, [][32]byte, error)
|
||||
// State related methods.
|
||||
State(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
StateOrError(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
@@ -86,6 +86,7 @@ type NoHeadAccessDatabase interface {
|
||||
RunMigrations(ctx context.Context) error
|
||||
// Fee reicipients operations.
|
||||
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, addrs []common.Address) error
|
||||
SaveRegistrationsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint types.Slot) error
|
||||
}
|
||||
|
||||
@@ -185,17 +185,19 @@ func (s *Store) HasBlock(ctx context.Context, blockRoot [32]byte) bool {
|
||||
}
|
||||
|
||||
// BlocksBySlot retrieves a list of beacon blocks and its respective roots by slot.
|
||||
func (s *Store) BlocksBySlot(ctx context.Context, slot types.Slot) (bool, []interfaces.SignedBeaconBlock, error) {
|
||||
func (s *Store) BlocksBySlot(ctx context.Context, slot types.Slot) ([]interfaces.SignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlocksBySlot")
|
||||
defer span.End()
|
||||
blocks := make([]interfaces.SignedBeaconBlock, 0)
|
||||
|
||||
blocks := make([]interfaces.SignedBeaconBlock, 0)
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
|
||||
keys := blockRootsBySlot(ctx, tx, slot)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
encoded := bkt.Get(keys[i])
|
||||
roots, err := blockRootsBySlot(ctx, tx, slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve blocks by slot")
|
||||
}
|
||||
for _, r := range roots {
|
||||
encoded := bkt.Get(r[:])
|
||||
blk, err := unmarshalBlock(ctx, encoded)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -204,7 +206,7 @@ func (s *Store) BlocksBySlot(ctx context.Context, slot types.Slot) (bool, []inte
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return len(blocks) > 0, blocks, err
|
||||
return blocks, err
|
||||
}
|
||||
|
||||
// BlockRootsBySlot retrieves a list of beacon block roots by slot
|
||||
@@ -213,11 +215,9 @@ func (s *Store) BlockRootsBySlot(ctx context.Context, slot types.Slot) (bool, []
|
||||
defer span.End()
|
||||
blockRoots := make([][32]byte, 0)
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
keys := blockRootsBySlot(ctx, tx, slot)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
blockRoots = append(blockRoots, bytesutil.ToBytes32(keys[i]))
|
||||
}
|
||||
return nil
|
||||
var err error
|
||||
blockRoots, err = blockRootsBySlot(ctx, tx, slot)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return false, nil, errors.Wrap(err, "could not retrieve block roots by slot")
|
||||
@@ -398,50 +398,63 @@ func (s *Store) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) e
|
||||
})
|
||||
}
|
||||
|
||||
// HighestSlotBlocksBelow returns the block with the highest slot below the input slot from the db.
|
||||
func (s *Store) HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]interfaces.SignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HighestSlotBlocksBelow")
|
||||
// HighestRootsBelowSlot returns roots from the database slot index from the highest slot below the input slot.
|
||||
// The slot value at the beginning of the return list is the slot where the roots were found. This is helpful so that
|
||||
// calling code can make decisions based on the slot without resolving the blocks to discover their slot (for instance
|
||||
// checking which root is canonical in fork choice, which operates purely on roots,
|
||||
// then if no canonical block is found, continuing to search through lower slots).
|
||||
func (s *Store) HighestRootsBelowSlot(ctx context.Context, slot types.Slot) (fs types.Slot, roots [][32]byte, err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HighestRootsBelowSlot")
|
||||
defer span.End()
|
||||
|
||||
var best []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
sk := bytesutil.Uint64ToBytesBigEndian(uint64(slot))
|
||||
err = s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
// Iterate through the index, which is in byte sorted order.
|
||||
c := bkt.Cursor()
|
||||
for s, root := c.First(); s != nil; s, root = c.Next() {
|
||||
// The documentation for Seek says:
|
||||
// "If the key does not exist then the next key is used. If no keys follow, a nil key is returned."
|
||||
seekPast := func(ic *bolt.Cursor, k []byte) ([]byte, []byte) {
|
||||
ik, iv := ic.Seek(k)
|
||||
// So if there are slots in the index higher than the requested slot, sl will be equal to the key that is
|
||||
// one higher than the value we want. If the slot argument is higher than the highest value in the index,
|
||||
// we'll get a nil value for `sl`. In that case we'll go backwards from Cursor.Last().
|
||||
if ik == nil {
|
||||
return ic.Last()
|
||||
}
|
||||
return ik, iv
|
||||
}
|
||||
// re loop condition: when .Prev() rewinds past the beginning off the collection, the loop will terminate,
|
||||
// because `sl` will be nil. If we don't find a value for `root` before iteration ends,
|
||||
// `root` will be the zero value, in which case this function will return the genesis block.
|
||||
for sl, r := seekPast(c, sk); sl != nil; sl, r = c.Prev() {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
key := bytesutil.BytesToSlotBigEndian(s)
|
||||
if root == nil {
|
||||
if r == nil {
|
||||
continue
|
||||
}
|
||||
if key >= slot {
|
||||
break
|
||||
fs = bytesutil.BytesToSlotBigEndian(sl)
|
||||
// Iterating through the index using .Prev will move from higher to lower, so the first key we find behind
|
||||
// the requested slot must be the highest block below that slot.
|
||||
if slot > fs {
|
||||
roots, err = splitRoots(r)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing packed roots %#x", r)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
best = root
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
})
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
if len(roots) == 0 || (len(roots) == 1 && roots[0] == params.BeaconConfig().ZeroHash) {
|
||||
gr, err := s.GenesisBlockRoot(ctx)
|
||||
return 0, [][32]byte{gr}, err
|
||||
}
|
||||
|
||||
var blk interfaces.SignedBeaconBlock
|
||||
var err error
|
||||
if best != nil {
|
||||
blk, err = s.Block(ctx, bytesutil.ToBytes32(best))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if blk == nil || blk.IsNil() {
|
||||
blk, err = s.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return []interfaces.SignedBeaconBlock{blk}, nil
|
||||
return fs, roots, nil
|
||||
}
|
||||
|
||||
// FeeRecipientByValidatorID returns the fee recipient for a validator id.
|
||||
@@ -453,8 +466,20 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id types.Validato
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(feeRecipientBucket)
|
||||
addr = bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
|
||||
// IF the fee recipient is not found in the standard fee recipient bucket, then
|
||||
// check the registration bucket. The fee recipient may be there.
|
||||
// This is to resolve imcompatility until we fully migrate to the registration bucket.
|
||||
if addr == nil {
|
||||
return errors.Wrapf(ErrNotFoundFeeRecipient, "validator id %d", id)
|
||||
bkt = tx.Bucket(registrationBucket)
|
||||
enc := bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
|
||||
if enc == nil {
|
||||
return errors.Wrapf(ErrNotFoundFeeRecipient, "validator id %d", id)
|
||||
}
|
||||
reg := ðpb.ValidatorRegistrationV1{}
|
||||
if err := decode(ctx, enc, reg); err != nil {
|
||||
return err
|
||||
}
|
||||
addr = reg.FeeRecipient
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -482,6 +507,48 @@ func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []types
|
||||
})
|
||||
}
|
||||
|
||||
// RegistrationByValidatorID returns the validator registration object for a validator id.
|
||||
// `ErrNotFoundFeeRecipient` is returned if the validator id is not found.
|
||||
func (s *Store) RegistrationByValidatorID(ctx context.Context, id types.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.RegistrationByValidatorID")
|
||||
defer span.End()
|
||||
reg := ðpb.ValidatorRegistrationV1{}
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(registrationBucket)
|
||||
enc := bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
|
||||
if enc == nil {
|
||||
return errors.Wrapf(ErrNotFoundFeeRecipient, "validator id %d", id)
|
||||
}
|
||||
return decode(ctx, enc, reg)
|
||||
})
|
||||
return reg, err
|
||||
}
|
||||
|
||||
// SaveRegistrationsByValidatorIDs saves the validator registrations for validator ids.
|
||||
// Error is returned if `ids` and `registrations` are not the same length.
|
||||
func (s *Store) SaveRegistrationsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveRegistrationsByValidatorIDs")
|
||||
defer span.End()
|
||||
|
||||
if len(ids) != len(regs) {
|
||||
return errors.New("ids and registrations must be the same length")
|
||||
}
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(registrationBucket)
|
||||
for i, id := range ids {
|
||||
enc, err := encode(ctx, regs[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bkt.Put(bytesutil.Uint64ToBytesBigEndian(uint64(id)), enc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// blockRootsByFilter retrieves the block roots given the filter criteria.
|
||||
func blockRootsByFilter(ctx context.Context, tx *bolt.Tx, f *filters.QueryFilter) ([][]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsByFilter")
|
||||
@@ -611,21 +678,22 @@ func blockRootsBySlotRange(
|
||||
}
|
||||
|
||||
// blockRootsBySlot retrieves the block roots by slot
|
||||
func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) [][]byte {
|
||||
func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) ([][32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlot")
|
||||
defer span.End()
|
||||
|
||||
roots := make([][]byte, 0)
|
||||
bkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
key := bytesutil.SlotToBytesBigEndian(slot)
|
||||
c := bkt.Cursor()
|
||||
k, v := c.Seek(key)
|
||||
if k != nil && bytes.Equal(k, key) {
|
||||
for i := 0; i < len(v); i += 32 {
|
||||
roots = append(roots, v[i:i+32])
|
||||
r, err := splitRoots(v)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "corrupt value in block slot index for slot=%d", slot)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
return roots
|
||||
return [][32]byte{}, nil
|
||||
}
|
||||
|
||||
// createBlockIndicesFromBlock takes in a beacon block and returns
|
||||
|
||||
@@ -517,18 +517,32 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, block2))
|
||||
require.NoError(t, db.SaveBlock(ctx, block3))
|
||||
|
||||
highestAt, err := db.HighestSlotBlocksBelow(ctx, 2)
|
||||
_, roots, err := db.HighestRootsBelowSlot(ctx, 2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, len(highestAt) <= 0, "Got empty highest at slice")
|
||||
assert.Equal(t, true, proto.Equal(block1.Proto(), highestAt[0].Proto()), "Wanted: %v, received: %v", block1, highestAt[0])
|
||||
highestAt, err = db.HighestSlotBlocksBelow(ctx, 11)
|
||||
assert.Equal(t, false, len(roots) <= 0, "Got empty highest at slice")
|
||||
require.Equal(t, 1, len(roots))
|
||||
root := roots[0]
|
||||
b, err := db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, len(highestAt) <= 0, "Got empty highest at slice")
|
||||
assert.Equal(t, true, proto.Equal(block2.Proto(), highestAt[0].Proto()), "Wanted: %v, received: %v", block2, highestAt[0])
|
||||
highestAt, err = db.HighestSlotBlocksBelow(ctx, 101)
|
||||
assert.Equal(t, true, proto.Equal(block1.Proto(), b.Proto()), "Wanted: %v, received: %v", block1, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 11)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, len(highestAt) <= 0, "Got empty highest at slice")
|
||||
assert.Equal(t, true, proto.Equal(block3.Proto(), highestAt[0].Proto()), "Wanted: %v, received: %v", block3, highestAt[0])
|
||||
assert.Equal(t, false, len(roots) <= 0, "Got empty highest at slice")
|
||||
require.Equal(t, 1, len(roots))
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block2.Proto(), b.Proto()), "Wanted: %v, received: %v", block2, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 101)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, len(roots) <= 0, "Got empty highest at slice")
|
||||
require.Equal(t, 1, len(roots))
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block3.Proto(), b.Proto()), "Wanted: %v, received: %v", block3, b)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -549,15 +563,29 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, block1))
|
||||
|
||||
highestAt, err := db.HighestSlotBlocksBelow(ctx, 2)
|
||||
_, roots, err := db.HighestRootsBelowSlot(ctx, 2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block1.Proto(), highestAt[0].Proto()), "Wanted: %v, received: %v", block1, highestAt[0])
|
||||
highestAt, err = db.HighestSlotBlocksBelow(ctx, 1)
|
||||
require.Equal(t, 1, len(roots))
|
||||
root := roots[0]
|
||||
b, err := db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(genesisBlock.Proto(), highestAt[0].Proto()), "Wanted: %v, received: %v", genesisBlock, highestAt[0])
|
||||
highestAt, err = db.HighestSlotBlocksBelow(ctx, 0)
|
||||
assert.Equal(t, true, proto.Equal(block1.Proto(), b.Proto()), "Wanted: %v, received: %v", block1, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(genesisBlock.Proto(), highestAt[0].Proto()), "Wanted: %v, received: %v", genesisBlock, highestAt[0])
|
||||
require.Equal(t, 1, len(roots))
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(genesisBlock.Proto(), b.Proto()), "Wanted: %v, received: %v", genesisBlock, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(roots))
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(genesisBlock.Proto(), b.Proto()), "Wanted: %v, received: %v", genesisBlock, b)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -638,22 +666,21 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
r3, err := b3.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
hasBlocks, retrievedBlocks, err := db.BlocksBySlot(ctx, 1)
|
||||
retrievedBlocks, err := db.BlocksBySlot(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(retrievedBlocks), "Unexpected number of blocks received, expected none")
|
||||
assert.Equal(t, false, hasBlocks, "Expected no blocks")
|
||||
hasBlocks, retrievedBlocks, err = db.BlocksBySlot(ctx, 20)
|
||||
retrievedBlocks, err = db.BlocksBySlot(ctx, 20)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(b1.Proto(), retrievedBlocks[0].Proto()), "Wanted: %v, received: %v", b1, retrievedBlocks[0])
|
||||
assert.Equal(t, true, hasBlocks, "Expected to have blocks")
|
||||
hasBlocks, retrievedBlocks, err = db.BlocksBySlot(ctx, 100)
|
||||
assert.Equal(t, true, len(retrievedBlocks) > 0, "Expected to have blocks")
|
||||
retrievedBlocks, err = db.BlocksBySlot(ctx, 100)
|
||||
require.NoError(t, err)
|
||||
if len(retrievedBlocks) != 2 {
|
||||
t.Fatalf("Expected 2 blocks, received %d blocks", len(retrievedBlocks))
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(b2.Proto(), retrievedBlocks[0].Proto()), "Wanted: %v, received: %v", b2, retrievedBlocks[0])
|
||||
assert.Equal(t, true, proto.Equal(b3.Proto(), retrievedBlocks[1].Proto()), "Wanted: %v, received: %v", b3, retrievedBlocks[1])
|
||||
assert.Equal(t, true, hasBlocks, "Expected to have blocks")
|
||||
assert.Equal(t, true, len(retrievedBlocks) > 0, "Expected to have blocks")
|
||||
|
||||
hasBlockRoots, retrievedBlockRoots, err := db.BlockRootsBySlot(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -693,4 +720,78 @@ func TestStore_FeeRecipientByValidatorID(t *testing.T) {
|
||||
_, err = db.FeeRecipientByValidatorID(ctx, 3)
|
||||
want := errors.Wrap(ErrNotFoundFeeRecipient, "validator id 3")
|
||||
require.Equal(t, want.Error(), err.Error())
|
||||
|
||||
regs := []*ethpb.ValidatorRegistrationV1{
|
||||
{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("a"), 20),
|
||||
GasLimit: 1,
|
||||
Timestamp: 2,
|
||||
Pubkey: bytesutil.PadTo([]byte("b"), 48),
|
||||
}}
|
||||
require.NoError(t, db.SaveRegistrationsByValidatorIDs(ctx, []types.ValidatorIndex{3}, regs))
|
||||
f, err = db.FeeRecipientByValidatorID(ctx, 3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, common.Address{'a'}, f)
|
||||
|
||||
_, err = db.FeeRecipientByValidatorID(ctx, 4)
|
||||
want = errors.Wrap(ErrNotFoundFeeRecipient, "validator id 4")
|
||||
require.Equal(t, want.Error(), err.Error())
|
||||
}
|
||||
|
||||
func TestStore_RegistrationsByValidatorID(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
ids := []types.ValidatorIndex{0, 0, 0}
|
||||
regs := []*ethpb.ValidatorRegistrationV1{{}, {}, {}, {}}
|
||||
require.ErrorContains(t, "ids and registrations must be the same length", db.SaveRegistrationsByValidatorIDs(ctx, ids, regs))
|
||||
|
||||
ids = []types.ValidatorIndex{0, 1, 2}
|
||||
regs = []*ethpb.ValidatorRegistrationV1{
|
||||
{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("a"), 20),
|
||||
GasLimit: 1,
|
||||
Timestamp: 2,
|
||||
Pubkey: bytesutil.PadTo([]byte("b"), 48),
|
||||
},
|
||||
{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("c"), 20),
|
||||
GasLimit: 3,
|
||||
Timestamp: 4,
|
||||
Pubkey: bytesutil.PadTo([]byte("d"), 48),
|
||||
},
|
||||
{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("e"), 20),
|
||||
GasLimit: 5,
|
||||
Timestamp: 6,
|
||||
Pubkey: bytesutil.PadTo([]byte("f"), 48),
|
||||
},
|
||||
}
|
||||
require.NoError(t, db.SaveRegistrationsByValidatorIDs(ctx, ids, regs))
|
||||
f, err := db.RegistrationByValidatorID(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("a"), 20),
|
||||
GasLimit: 1,
|
||||
Timestamp: 2,
|
||||
Pubkey: bytesutil.PadTo([]byte("b"), 48),
|
||||
}, f)
|
||||
f, err = db.RegistrationByValidatorID(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("c"), 20),
|
||||
GasLimit: 3,
|
||||
Timestamp: 4,
|
||||
Pubkey: bytesutil.PadTo([]byte("d"), 48),
|
||||
}, f)
|
||||
f, err = db.RegistrationByValidatorID(ctx, 2)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("e"), 20),
|
||||
GasLimit: 5,
|
||||
Timestamp: 6,
|
||||
Pubkey: bytesutil.PadTo([]byte("f"), 48),
|
||||
}, f)
|
||||
_, err = db.RegistrationByValidatorID(ctx, 3)
|
||||
want := errors.Wrap(ErrNotFoundFeeRecipient, "validator id 3")
|
||||
require.Equal(t, want.Error(), err.Error())
|
||||
}
|
||||
|
||||
@@ -70,6 +70,8 @@ func isSSZStorageFormat(obj interface{}) bool {
|
||||
return true
|
||||
case *ethpb.VoluntaryExit:
|
||||
return true
|
||||
case *ethpb.ValidatorRegistrationV1:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -105,7 +105,6 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
}
|
||||
}
|
||||
datafile := KVStoreDatafilePath(dirPath)
|
||||
start := time.Now()
|
||||
log.Infof("Opening Bolt DB at %s", datafile)
|
||||
boltDB, err := bolt.Open(
|
||||
datafile,
|
||||
@@ -116,40 +115,29 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
log.WithField("elapsed", time.Since(start)).Error("Failed to open Bolt DB")
|
||||
if errors.Is(err, bolt.ErrTimeout) {
|
||||
return nil, errors.New("cannot obtain database lock, database may be in use by another process")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Opened Bolt DB")
|
||||
|
||||
boltDB.AllocSize = boltAllocSize
|
||||
start = time.Now()
|
||||
log.Infof("Creating block cache...")
|
||||
blockCache, err := ristretto.NewCache(&ristretto.Config{
|
||||
NumCounters: 1000, // number of keys to track frequency of (1000).
|
||||
MaxCost: BlockCacheSize, // maximum cost of cache (1000 Blocks).
|
||||
BufferItems: 64, // number of keys per Get buffer.
|
||||
})
|
||||
if err != nil {
|
||||
log.WithField("elapsed", time.Since(start)).Error("Failed to create block cache")
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Created block cache")
|
||||
|
||||
start = time.Now()
|
||||
log.Infof("Creating validator cache...")
|
||||
validatorCache, err := ristretto.NewCache(&ristretto.Config{
|
||||
NumCounters: NumOfValidatorEntries, // number of entries in cache (2 Million).
|
||||
MaxCost: ValidatorEntryMaxCost, // maximum size of the cache (64Mb)
|
||||
BufferItems: 64, // number of keys per Get buffer.
|
||||
})
|
||||
if err != nil {
|
||||
log.WithField("elapsed", time.Since(start)).Error("Failed to to create validator cache")
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Created validator cache")
|
||||
|
||||
kv := &Store{
|
||||
db: boltDB,
|
||||
@@ -159,8 +147,6 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
stateSummaryCache: newStateSummaryCache(),
|
||||
ctx: ctx,
|
||||
}
|
||||
start = time.Now()
|
||||
log.Infof("Updating DB and creating buckets...")
|
||||
if err := kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return createBuckets(
|
||||
tx,
|
||||
@@ -192,15 +178,12 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
migrationsBucket,
|
||||
|
||||
feeRecipientBucket,
|
||||
registrationBucket,
|
||||
)
|
||||
}); err != nil {
|
||||
log.WithField("elapsed", time.Since(start)).Error("Failed to update db and create buckets")
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Updated db and created buckets")
|
||||
|
||||
err = prometheus.Register(createBoltCollector(kv.db))
|
||||
|
||||
return kv, err
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ var (
|
||||
powchainBucket = []byte("powchain")
|
||||
stateValidatorsBucket = []byte("state-validators")
|
||||
feeRecipientBucket = []byte("fee-recipient")
|
||||
registrationBucket = []byte("registration")
|
||||
|
||||
// Deprecated: This bucket was migrated in PR 6461. Do not use, except for migrations.
|
||||
slotsHasObjectBucket = []byte("slots-has-objects")
|
||||
|
||||
@@ -162,12 +162,27 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
|
||||
if states == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
validatorKeys, validatorsEntries, err := getValidators(states)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
return s.saveStatesEfficientInternal(ctx, tx, blockRoots, states, validatorKeys, validatorsEntries)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getValidators(states []state.ReadOnlyBeaconState) ([][]byte, map[string]*ethpb.Validator, error) {
|
||||
validatorsEntries := make(map[string]*ethpb.Validator) // It's a map to make sure that you store only new validator entries.
|
||||
validatorKeys := make([][]byte, len(states)) // For every state, this stores a compressed list of validator keys.
|
||||
for i, st := range states {
|
||||
pb, ok := st.InnerStateUnsafe().(withValidators)
|
||||
if !ok {
|
||||
return errors.New("could not cast state to interface with GetValidators()")
|
||||
return nil, nil, errors.New("could not cast state to interface with GetValidators()")
|
||||
}
|
||||
validators := pb.GetValidators()
|
||||
|
||||
@@ -177,7 +192,7 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
|
||||
// create the unique hash for that validator entry.
|
||||
hash, hashErr := val.HashTreeRoot()
|
||||
if hashErr != nil {
|
||||
return hashErr
|
||||
return nil, nil, hashErr
|
||||
}
|
||||
hashes = append(hashes, hash[:]...)
|
||||
|
||||
@@ -187,117 +202,113 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
|
||||
}
|
||||
validatorKeys[i] = snappy.Encode(nil, hashes)
|
||||
}
|
||||
return validatorKeys, validatorsEntries, nil
|
||||
}
|
||||
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateBucket)
|
||||
valIdxBkt := tx.Bucket(blockRootValidatorHashesBucket)
|
||||
for i, rt := range blockRoots {
|
||||
indicesByBucket := createStateIndicesFromStateSlot(ctx, states[i].Slot())
|
||||
if err := updateValueForIndices(ctx, indicesByBucket, rt[:], tx); err != nil {
|
||||
return errors.Wrap(err, "could not update DB indices")
|
||||
}
|
||||
|
||||
// There is a gap when the states that are passed are used outside this
|
||||
// thread. But while storing the state object, we should not store the
|
||||
// validator entries.To bring the gap closer, we empty the validators
|
||||
// just before Put() and repopulate that state with original validators.
|
||||
// look at issue https://github.com/prysmaticlabs/prysm/issues/9262.
|
||||
switch rawType := states[i].InnerStateUnsafe().(type) {
|
||||
case *ethpb.BeaconState:
|
||||
var pbState *ethpb.BeaconState
|
||||
var err error
|
||||
if features.Get().EnableNativeState {
|
||||
pbState, err = state_native.ProtobufBeaconStatePhase0(rawType)
|
||||
} else {
|
||||
pbState, err = v1.ProtobufBeaconState(rawType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
encodedState, err := encode(ctx, pbState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateAltair:
|
||||
var pbState *ethpb.BeaconStateAltair
|
||||
var err error
|
||||
if features.Get().EnableNativeState {
|
||||
pbState, err = state_native.ProtobufBeaconStateAltair(rawType)
|
||||
} else {
|
||||
pbState, err = v2.ProtobufBeaconState(rawType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(altairKey, rawObj...))
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateBellatrix:
|
||||
var pbState *ethpb.BeaconStateBellatrix
|
||||
var err error
|
||||
if features.Get().EnableNativeState {
|
||||
pbState, err = state_native.ProtobufBeaconStateBellatrix(rawType)
|
||||
} else {
|
||||
pbState, err = v3.ProtobufBeaconState(rawType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(bellatrixKey, rawObj...))
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.New("invalid state type")
|
||||
}
|
||||
func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, blockRoots [][32]byte, states []state.ReadOnlyBeaconState, validatorKeys [][]byte, validatorsEntries map[string]*ethpb.Validator) error {
|
||||
bucket := tx.Bucket(stateBucket)
|
||||
valIdxBkt := tx.Bucket(blockRootValidatorHashesBucket)
|
||||
for i, rt := range blockRoots {
|
||||
indicesByBucket := createStateIndicesFromStateSlot(ctx, states[i].Slot())
|
||||
if err := updateValueForIndices(ctx, indicesByBucket, rt[:], tx); err != nil {
|
||||
return errors.Wrap(err, "could not update DB indices")
|
||||
}
|
||||
|
||||
// store the validator entries separately to save space.
|
||||
return s.storeValidatorEntriesSeparately(ctx, tx, validatorsEntries)
|
||||
}); err != nil {
|
||||
return err
|
||||
// There is a gap when the states that are passed are used outside this
|
||||
// thread. But while storing the state object, we should not store the
|
||||
// validator entries.To bring the gap closer, we empty the validators
|
||||
// just before Put() and repopulate that state with original validators.
|
||||
// look at issue https://github.com/prysmaticlabs/prysm/issues/9262.
|
||||
switch rawType := states[i].InnerStateUnsafe().(type) {
|
||||
case *ethpb.BeaconState:
|
||||
var pbState *ethpb.BeaconState
|
||||
var err error
|
||||
if features.Get().EnableNativeState {
|
||||
pbState, err = state_native.ProtobufBeaconStatePhase0(rawType)
|
||||
} else {
|
||||
pbState, err = v1.ProtobufBeaconState(rawType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
encodedState, err := encode(ctx, pbState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateAltair:
|
||||
var pbState *ethpb.BeaconStateAltair
|
||||
var err error
|
||||
if features.Get().EnableNativeState {
|
||||
pbState, err = state_native.ProtobufBeaconStateAltair(rawType)
|
||||
} else {
|
||||
pbState, err = v2.ProtobufBeaconState(rawType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(altairKey, rawObj...))
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateBellatrix:
|
||||
var pbState *ethpb.BeaconStateBellatrix
|
||||
var err error
|
||||
if features.Get().EnableNativeState {
|
||||
pbState, err = state_native.ProtobufBeaconStateBellatrix(rawType)
|
||||
} else {
|
||||
pbState, err = v3.ProtobufBeaconState(rawType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(bellatrixKey, rawObj...))
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.New("invalid state type")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
// store the validator entries separately to save space.
|
||||
return s.storeValidatorEntriesSeparately(ctx, tx, validatorsEntries)
|
||||
}
|
||||
|
||||
func (s *Store) storeValidatorEntriesSeparately(ctx context.Context, tx *bolt.Tx, validatorsEntries map[string]*ethpb.Validator) error {
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -99,3 +101,16 @@ func deleteValueForIndices(ctx context.Context, indicesByBucket map[string][]byt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var errMisalignedRootList = errors.New("incorrectly packed root list, length is not a multiple of 32")
|
||||
|
||||
func splitRoots(b []byte) ([][32]byte, error) {
|
||||
rl := make([][32]byte, 0)
|
||||
if len(b)%32 != 0 {
|
||||
return nil, errors.Wrapf(errMisalignedRootList, "root list len=%d", len(b))
|
||||
}
|
||||
for s, f := 0, 32; f <= len(b); s, f = f, f+32 {
|
||||
rl = append(rl, bytesutil.ToBytes32(b[s:f]))
|
||||
}
|
||||
return rl, nil
|
||||
}
|
||||
|
||||
@@ -138,3 +138,60 @@ func Test_deleteValueForIndices(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testPack(bs [][32]byte) []byte {
|
||||
r := make([]byte, 0)
|
||||
for _, b := range bs {
|
||||
r = append(r, b[:]...)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func TestSplitRoots(t *testing.T) {
|
||||
bt := make([][32]byte, 0)
|
||||
for _, x := range []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} {
|
||||
var b [32]byte
|
||||
for i := 0; i < 32; i++ {
|
||||
b[i] = x
|
||||
}
|
||||
bt = append(bt, b)
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
b []byte
|
||||
expect [][32]byte
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "misaligned",
|
||||
b: make([]byte, 61),
|
||||
err: errMisalignedRootList,
|
||||
},
|
||||
{
|
||||
name: "happy",
|
||||
b: testPack(bt[0:5]),
|
||||
expect: bt[0:5],
|
||||
},
|
||||
{
|
||||
name: "single",
|
||||
b: testPack([][32]byte{bt[0]}),
|
||||
expect: [][32]byte{bt[0]},
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
b: []byte{},
|
||||
expect: [][32]byte{},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
r, err := splitRoots(c.b)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, c.expect, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"interfaces.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice",
|
||||
@@ -13,8 +14,10 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -20,12 +20,16 @@ go_library(
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
@@ -49,13 +53,18 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v3:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -12,3 +12,4 @@ var errUnknownPayloadHash = errors.New("unknown payload hash")
|
||||
var errInvalidNilCheckpoint = errors.New("invalid nil checkpoint")
|
||||
var errInvalidUnrealizedJustifiedEpoch = errors.New("invalid unrealized justified epoch")
|
||||
var errInvalidUnrealizedFinalizedEpoch = errors.New("invalid unrealized finalized epoch")
|
||||
var errNilBlockHeader = errors.New("invalid nil block header")
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
@@ -13,9 +14,10 @@ import (
|
||||
func TestFFGUpdates_OneBranch(t *testing.T) {
|
||||
balances := []uint64{1, 1}
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
|
||||
// The head should always start at the finalized block.
|
||||
r, err := f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
|
||||
r, err := f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, params.BeaconConfig().ZeroHash, r, "Incorrect head with genesis")
|
||||
|
||||
@@ -27,9 +29,15 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
|
||||
// 2 <- justified: 1, finalized: 0
|
||||
// |
|
||||
// 3 <- justified: 2, finalized: 1
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 2, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 2, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// With starting justified epoch at 0, the head should be 3:
|
||||
// 0 <- start
|
||||
@@ -39,7 +47,7 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
|
||||
// 2
|
||||
// |
|
||||
// 3 <- head
|
||||
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(3), r, "Incorrect head for with justified epoch at 0")
|
||||
|
||||
@@ -51,8 +59,8 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
|
||||
// 2 <- head
|
||||
// |
|
||||
// 3
|
||||
f.store.justifiedEpoch = 1
|
||||
r, err = f.Head(context.Background(), indexToHash(2), balances)
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(2), Epoch: 1}
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(2), r, "Incorrect head with justified epoch at 1")
|
||||
|
||||
@@ -64,8 +72,8 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
|
||||
// 2 <- start
|
||||
// |
|
||||
// 3 <- head
|
||||
f.store.justifiedEpoch = 2
|
||||
r, err = f.Head(context.Background(), indexToHash(3), balances)
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(3), Epoch: 2}
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(3), r, "Incorrect head with justified epoch at 2")
|
||||
}
|
||||
@@ -73,8 +81,9 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
|
||||
func TestFFGUpdates_TwoBranches(t *testing.T) {
|
||||
balances := []uint64{1, 1}
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
|
||||
r, err := f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
|
||||
r, err := f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, params.BeaconConfig().ZeroHash, r, "Incorrect head with genesis")
|
||||
|
||||
@@ -91,17 +100,37 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
|
||||
// | |
|
||||
// justified: 2, finalized: 0 -> 9 10 <- justified: 2, finalized: 0
|
||||
// Left branch.
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(5), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(7), indexToHash(5), params.BeaconConfig().ZeroHash, 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(9), indexToHash(7), params.BeaconConfig().ZeroHash, 2, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 2, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 3, indexToHash(5), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 4, indexToHash(7), indexToHash(5), params.BeaconConfig().ZeroHash, 1, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 4, indexToHash(9), indexToHash(7), params.BeaconConfig().ZeroHash, 2, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
// Right branch.
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 2, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 3, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(8), indexToHash(6), params.BeaconConfig().ZeroHash, 1, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 4, indexToHash(10), indexToHash(8), params.BeaconConfig().ZeroHash, 2, 0))
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 1, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 2, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 3, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 4, indexToHash(8), indexToHash(6), params.BeaconConfig().ZeroHash, 1, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 4, indexToHash(10), indexToHash(8), params.BeaconConfig().ZeroHash, 2, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// With start at 0, the head should be 10:
|
||||
// 0 <-- start
|
||||
@@ -115,7 +144,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
|
||||
// 7 8
|
||||
// | |
|
||||
// 9 10 <-- head
|
||||
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head with justified epoch at 0")
|
||||
|
||||
@@ -145,7 +174,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
|
||||
// 7 8
|
||||
// | |
|
||||
// head -> 9 10
|
||||
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(9), r, "Incorrect head with justified epoch at 0")
|
||||
|
||||
@@ -175,20 +204,26 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
|
||||
// 7 8
|
||||
// | |
|
||||
// 9 10 <-- head
|
||||
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head with justified epoch at 0")
|
||||
|
||||
f.store.justifiedEpoch = 1
|
||||
r, err = f.Head(context.Background(), indexToHash(1), balances)
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1, Root: indexToHash(1)}
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(7), r, "Incorrect head with justified epoch at 0")
|
||||
}
|
||||
|
||||
func setup(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
|
||||
ctx := context.Background()
|
||||
f := New(justifiedEpoch, finalizedEpoch)
|
||||
err := f.InsertOptimisticBlock(ctx, 0, params.BeaconConfig().ZeroHash, [32]byte{}, params.BeaconConfig().ZeroHash, justifiedEpoch, finalizedEpoch)
|
||||
f := New()
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: justifiedEpoch, Root: params.BeaconConfig().ZeroHash}
|
||||
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: finalizedEpoch, Root: params.BeaconConfig().ZeroHash}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, params.BeaconConfig().ZeroHash, [32]byte{}, params.BeaconConfig().ZeroHash, justifiedEpoch, finalizedEpoch)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
err = f.InsertNode(ctx, state, blkRoot)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,25 +5,30 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// New initializes a new fork choice store.
|
||||
func New(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
|
||||
func New() *ForkChoice {
|
||||
s := &Store{
|
||||
justifiedEpoch: justifiedEpoch,
|
||||
finalizedEpoch: finalizedEpoch,
|
||||
proposerBoostRoot: [32]byte{},
|
||||
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
slashedIndices: make(map[types.ValidatorIndex]bool),
|
||||
pruneThreshold: defaultPruneThreshold,
|
||||
justifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
proposerBoostRoot: [32]byte{},
|
||||
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
slashedIndices: make(map[types.ValidatorIndex]bool),
|
||||
pruneThreshold: defaultPruneThreshold,
|
||||
}
|
||||
|
||||
b := make([]uint64, 0)
|
||||
@@ -42,7 +47,6 @@ func (f *ForkChoice) NodeCount() int {
|
||||
// It firsts computes validator's balance changes then recalculates block tree from leaves to root.
|
||||
func (f *ForkChoice) Head(
|
||||
ctx context.Context,
|
||||
justifiedRoot [32]byte,
|
||||
justifiedStateBalances []uint64,
|
||||
) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Head")
|
||||
@@ -68,10 +72,12 @@ func (f *ForkChoice) Head(
|
||||
return [32]byte{}, errors.Wrap(err, "could not apply weight changes")
|
||||
}
|
||||
|
||||
if err := f.store.treeRootNode.updateBestDescendant(ctx, f.store.justifiedEpoch, f.store.finalizedEpoch); err != nil {
|
||||
jc := f.JustifiedCheckpoint()
|
||||
fc := f.FinalizedCheckpoint()
|
||||
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not update best descendant")
|
||||
}
|
||||
return f.store.head(ctx, justifiedRoot)
|
||||
return f.store.head(ctx)
|
||||
}
|
||||
|
||||
// ProcessAttestation processes attestation for vote accounting, it iterates around validator indices
|
||||
@@ -102,17 +108,38 @@ func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []
|
||||
processedAttestationCount.Inc()
|
||||
}
|
||||
|
||||
// InsertOptimisticBlock processes a new block by inserting it to the fork choice store.
|
||||
func (f *ForkChoice) InsertOptimisticBlock(
|
||||
ctx context.Context,
|
||||
slot types.Slot,
|
||||
blockRoot, parentRoot, payloadHash [fieldparams.RootLength]byte,
|
||||
justifiedEpoch, finalizedEpoch types.Epoch,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.InsertOptimisticBlock")
|
||||
// InsertNode processes a new block by inserting it to the fork choice store.
|
||||
func (f *ForkChoice) InsertNode(ctx context.Context, state state.ReadOnlyBeaconState, root [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.InsertNode")
|
||||
defer span.End()
|
||||
|
||||
return f.store.insert(ctx, slot, blockRoot, parentRoot, payloadHash, justifiedEpoch, finalizedEpoch)
|
||||
slot := state.Slot()
|
||||
bh := state.LatestBlockHeader()
|
||||
if bh == nil {
|
||||
return errNilBlockHeader
|
||||
}
|
||||
parentRoot := bytesutil.ToBytes32(bh.ParentRoot)
|
||||
payloadHash := [32]byte{}
|
||||
if state.Version() >= version.Bellatrix {
|
||||
ph, err := state.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ph != nil {
|
||||
copy(payloadHash[:], ph.BlockHash)
|
||||
}
|
||||
}
|
||||
jc := state.CurrentJustifiedCheckpoint()
|
||||
if jc == nil {
|
||||
return errInvalidNilCheckpoint
|
||||
}
|
||||
justifiedEpoch := jc.Epoch
|
||||
fc := state.FinalizedCheckpoint()
|
||||
if fc == nil {
|
||||
return errInvalidNilCheckpoint
|
||||
}
|
||||
finalizedEpoch := fc.Epoch
|
||||
return f.store.insert(ctx, slot, root, parentRoot, payloadHash, justifiedEpoch, finalizedEpoch)
|
||||
}
|
||||
|
||||
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
|
||||
@@ -174,7 +201,7 @@ func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
|
||||
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return false, ErrNilNode
|
||||
return true, ErrNilNode
|
||||
}
|
||||
|
||||
return node.optimistic, nil
|
||||
@@ -190,7 +217,7 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
|
||||
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return nil, ErrNilNode
|
||||
return nil, errors.Wrap(ErrNilNode, "could not determine ancestor root")
|
||||
}
|
||||
|
||||
n := node
|
||||
@@ -202,7 +229,7 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
|
||||
}
|
||||
|
||||
if n == nil {
|
||||
return nil, ErrNilNode
|
||||
return nil, errors.Wrap(ErrNilNode, "could not determine ancestor root")
|
||||
}
|
||||
|
||||
return n.root[:], nil
|
||||
@@ -241,7 +268,7 @@ func (f *ForkChoice) updateBalances(newBalances []uint64) error {
|
||||
if ok && vote.nextRoot != params.BeaconConfig().ZeroHash {
|
||||
// Protection against nil node
|
||||
if nextNode == nil {
|
||||
return ErrNilNode
|
||||
return errors.Wrap(ErrNilNode, "could not update balances")
|
||||
}
|
||||
nextNode.balance += newBalance
|
||||
}
|
||||
@@ -250,7 +277,7 @@ func (f *ForkChoice) updateBalances(newBalances []uint64) error {
|
||||
if ok && vote.currentRoot != params.BeaconConfig().ZeroHash {
|
||||
// Protection against nil node
|
||||
if currentNode == nil {
|
||||
return ErrNilNode
|
||||
return errors.Wrap(ErrNilNode, "could not update balances")
|
||||
}
|
||||
if currentNode.balance < oldBalance {
|
||||
f.store.proposerBoostLock.RLock()
|
||||
@@ -295,25 +322,29 @@ func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams
|
||||
defer f.store.nodesLock.Unlock()
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return ErrNilNode
|
||||
return errors.Wrap(ErrNilNode, "could not set node to valid")
|
||||
}
|
||||
return node.setNodeAndParentValidated(ctx)
|
||||
}
|
||||
|
||||
// JustifiedEpoch of fork choice store.
|
||||
func (f *ForkChoice) JustifiedEpoch() types.Epoch {
|
||||
return f.store.justifiedEpoch
|
||||
// JustifiedCheckpoint of fork choice store.
|
||||
func (f *ForkChoice) JustifiedCheckpoint() *forkchoicetypes.Checkpoint {
|
||||
f.store.checkpointsLock.RLock()
|
||||
defer f.store.checkpointsLock.RUnlock()
|
||||
return f.store.justifiedCheckpoint
|
||||
}
|
||||
|
||||
// FinalizedEpoch of fork choice store.
|
||||
func (f *ForkChoice) FinalizedEpoch() types.Epoch {
|
||||
return f.store.finalizedEpoch
|
||||
// FinalizedCheckpoint of fork choice store.
|
||||
func (f *ForkChoice) FinalizedCheckpoint() *forkchoicetypes.Checkpoint {
|
||||
f.store.checkpointsLock.RLock()
|
||||
defer f.store.checkpointsLock.RUnlock()
|
||||
return f.store.finalizedCheckpoint
|
||||
}
|
||||
|
||||
func (f *ForkChoice) ForkChoiceNodes() []*pbrpc.ForkChoiceNode {
|
||||
func (f *ForkChoice) ForkChoiceNodes() []*ethpb.ForkChoiceNode {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
ret := make([]*pbrpc.ForkChoiceNode, len(f.store.nodeByRoot))
|
||||
ret := make([]*ethpb.ForkChoiceNode, len(f.store.nodeByRoot))
|
||||
return f.store.treeRootNode.rpcNodes(ret)
|
||||
}
|
||||
|
||||
@@ -358,24 +389,98 @@ func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index types.Validator
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateJustifiedCheckpoint sets the justified epoch to the given one
|
||||
func (f *ForkChoice) UpdateJustifiedCheckpoint(jc *pbrpc.Checkpoint) error {
|
||||
// UpdateJustifiedCheckpoint sets the justified checkpoint to the given one
|
||||
func (f *ForkChoice) UpdateJustifiedCheckpoint(jc *forkchoicetypes.Checkpoint) error {
|
||||
if jc == nil {
|
||||
return errInvalidNilCheckpoint
|
||||
}
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
f.store.justifiedEpoch = jc.Epoch
|
||||
f.store.checkpointsLock.Lock()
|
||||
defer f.store.checkpointsLock.Unlock()
|
||||
f.store.justifiedCheckpoint = jc
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateFinalizedCheckpoint sets the finalized epoch to the given one
|
||||
func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *pbrpc.Checkpoint) error {
|
||||
// UpdateFinalizedCheckpoint sets the finalized checkpoint to the given one
|
||||
func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *forkchoicetypes.Checkpoint) error {
|
||||
if fc == nil {
|
||||
return errInvalidNilCheckpoint
|
||||
}
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
f.store.finalizedEpoch = fc.Epoch
|
||||
f.store.checkpointsLock.Lock()
|
||||
defer f.store.checkpointsLock.Unlock()
|
||||
f.store.finalizedCheckpoint = fc
|
||||
return nil
|
||||
}
|
||||
|
||||
// CommonAncestorRoot returns the common ancestor root between the two block roots r1 and r2.
|
||||
func (f *ForkChoice) CommonAncestorRoot(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "doublelinkedtree.CommonAncestorRoot")
|
||||
defer span.End()
|
||||
|
||||
// Do nothing if the input roots are the same.
|
||||
if r1 == r2 {
|
||||
return r1, nil
|
||||
}
|
||||
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
n1, ok := f.store.nodeByRoot[r1]
|
||||
if !ok || n1 == nil {
|
||||
return [32]byte{}, errors.Wrap(ErrNilNode, "could not determine common ancestor root")
|
||||
}
|
||||
n2, ok := f.store.nodeByRoot[r2]
|
||||
if !ok || n2 == nil {
|
||||
return [32]byte{}, errors.Wrap(ErrNilNode, "could not determine common ancestor root")
|
||||
}
|
||||
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return [32]byte{}, ctx.Err()
|
||||
}
|
||||
if n1.slot > n2.slot {
|
||||
n1 = n1.parent
|
||||
// Reaches the end of the tree and unable to find common ancestor.
|
||||
// This should not happen at runtime as the finalized
|
||||
// node has to be a common ancestor
|
||||
if n1 == nil {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
} else {
|
||||
n2 = n2.parent
|
||||
// Reaches the end of the tree and unable to find common ancestor.
|
||||
if n2 == nil {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
}
|
||||
if n1 == n2 {
|
||||
return n1.root, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// InsertOptimisticChain inserts all nodes corresponding to blocks in the slice
|
||||
// `blocks`. This slice must be ordered from child to parent. It includes all
|
||||
// blocks **except** the first one (that is the one with the highest slot
|
||||
// number). All blocks are assumed to be a strict chain
|
||||
// where blocks[i].Parent = blocks[i+1]. Also we assume that the parent of the
|
||||
// last block in this list is already included in forkchoice store.
|
||||
func (f *ForkChoice) InsertOptimisticChain(ctx context.Context, chain []*forkchoicetypes.BlockAndCheckpoints) error {
|
||||
if len(chain) == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := len(chain) - 1; i > 0; i-- {
|
||||
b := chain[i].Block
|
||||
r := bytesutil.ToBytes32(chain[i-1].Block.ParentRoot())
|
||||
parentRoot := bytesutil.ToBytes32(b.ParentRoot())
|
||||
payloadHash, err := blocks.GetBlockPayloadHash(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f.store.insert(ctx,
|
||||
b.Slot(), r, parentRoot, payloadHash,
|
||||
chain[i].JustifiedCheckpoint.Epoch, chain[i].FinalizedCheckpoint.Epoch); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,21 +5,73 @@ import (
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
)
|
||||
|
||||
// prepareForkchoiceState prepares a beacon State with the given data to mock
|
||||
// insert into forkchoice
|
||||
func prepareForkchoiceState(
|
||||
_ context.Context,
|
||||
slot types.Slot,
|
||||
blockRoot [32]byte,
|
||||
parentRoot [32]byte,
|
||||
payloadHash [32]byte,
|
||||
justifiedEpoch types.Epoch,
|
||||
finalizedEpoch types.Epoch,
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
blockHeader := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
|
||||
executionHeader := ðpb.ExecutionPayloadHeader{
|
||||
BlockHash: payloadHash[:],
|
||||
}
|
||||
|
||||
justifiedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: justifiedEpoch,
|
||||
}
|
||||
|
||||
finalizedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: finalizedEpoch,
|
||||
}
|
||||
|
||||
base := ðpb.BeaconStateBellatrix{
|
||||
Slot: slot,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
CurrentJustifiedCheckpoint: justifiedCheckpoint,
|
||||
FinalizedCheckpoint: finalizedCheckpoint,
|
||||
LatestExecutionPayloadHeader: executionHeader,
|
||||
LatestBlockHeader: blockHeader,
|
||||
}
|
||||
|
||||
st, err := v3.InitializeFromProto(base)
|
||||
return st, blockRoot, err
|
||||
}
|
||||
|
||||
func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
f.votes = []Vote{
|
||||
{indexToHash(1), indexToHash(1), 0},
|
||||
@@ -39,9 +91,15 @@ func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
|
||||
func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
@@ -63,9 +121,15 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
@@ -87,12 +151,24 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
|
||||
func TestForkChoice_IsCanonical(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 6, indexToHash(6), indexToHash(5), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 6, indexToHash(6), indexToHash(5), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
require.Equal(t, true, f.IsCanonical(params.BeaconConfig().ZeroHash))
|
||||
require.Equal(t, false, f.IsCanonical(indexToHash(1)))
|
||||
@@ -106,12 +182,24 @@ func TestForkChoice_IsCanonical(t *testing.T) {
|
||||
func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, [32]byte{'2'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, [32]byte{'4'}, [32]byte{'2'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, [32]byte{'5'}, [32]byte{'4'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 6, [32]byte{'6'}, [32]byte{'5'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, [32]byte{'2'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, [32]byte{'4'}, [32]byte{'2'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, [32]byte{'5'}, [32]byte{'4'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 6, [32]byte{'6'}, [32]byte{'5'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodeByRoot[[32]byte{'3'}].balance = 10
|
||||
@@ -123,7 +211,9 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
|
||||
f.store.nodesLock.Unlock()
|
||||
|
||||
h, err := f.store.head(ctx, [32]byte{'1'})
|
||||
r1 := [32]byte{'1'}
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1, Root: r1}
|
||||
h, err := f.store.head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, [32]byte{'3'}, h)
|
||||
require.DeepEqual(t, h, f.store.headNode.root)
|
||||
@@ -140,9 +230,15 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
func TestForkChoice_AncestorRoot(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
f.store.treeRootNode = f.store.nodeByRoot[indexToHash(1)]
|
||||
f.store.treeRootNode.parent = nil
|
||||
|
||||
@@ -166,8 +262,12 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
|
||||
func TestForkChoice_AncestorEqualSlot(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 100)
|
||||
require.NoError(t, err)
|
||||
@@ -178,8 +278,12 @@ func TestForkChoice_AncestorEqualSlot(t *testing.T) {
|
||||
func TestForkChoice_AncestorLowerSlot(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 200, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 200, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 150)
|
||||
require.NoError(t, err)
|
||||
@@ -191,29 +295,35 @@ func TestForkChoice_RemoveEquivocating(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
// Insert a block it will be head
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
|
||||
head, err := f.Head(ctx, params.BeaconConfig().ZeroHash, []uint64{})
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
head, err := f.Head(ctx, []uint64{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'a'}, head)
|
||||
|
||||
// Insert two extra blocks
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1))
|
||||
head, err = f.Head(ctx, params.BeaconConfig().ZeroHash, []uint64{})
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
head, err = f.Head(ctx, []uint64{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'c'}, head)
|
||||
|
||||
// Insert two attestations for block b, one for c it becomes head
|
||||
f.ProcessAttestation(ctx, []uint64{1, 2}, [32]byte{'b'}, 1)
|
||||
f.ProcessAttestation(ctx, []uint64{3}, [32]byte{'c'}, 1)
|
||||
head, err = f.Head(ctx, params.BeaconConfig().ZeroHash, []uint64{100, 200, 200, 300})
|
||||
head, err = f.Head(ctx, []uint64{100, 200, 200, 300})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'b'}, head)
|
||||
|
||||
// Process b's slashing, c is now head
|
||||
f.InsertSlashedIndex(ctx, 1)
|
||||
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
|
||||
head, err = f.Head(ctx, params.BeaconConfig().ZeroHash, []uint64{100, 200, 200, 300})
|
||||
head, err = f.Head(ctx, []uint64{100, 200, 200, 300})
|
||||
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
|
||||
require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
|
||||
require.NoError(t, err)
|
||||
@@ -222,7 +332,7 @@ func TestForkChoice_RemoveEquivocating(t *testing.T) {
|
||||
// Process b's slashing again, should be a noop
|
||||
f.InsertSlashedIndex(ctx, 1)
|
||||
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
|
||||
head, err = f.Head(ctx, params.BeaconConfig().ZeroHash, []uint64{100, 200, 200, 300})
|
||||
head, err = f.Head(ctx, []uint64{100, 200, 200, 300})
|
||||
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
|
||||
require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
|
||||
require.NoError(t, err)
|
||||
@@ -244,10 +354,242 @@ func TestStore_UpdateCheckpoints(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
jr := [32]byte{'j'}
|
||||
fr := [32]byte{'f'}
|
||||
jc := ðpb.Checkpoint{Root: jr[:], Epoch: 3}
|
||||
fc := ðpb.Checkpoint{Root: fr[:], Epoch: 2}
|
||||
jc := &forkchoicetypes.Checkpoint{Root: jr, Epoch: 3}
|
||||
fc := &forkchoicetypes.Checkpoint{Root: fr, Epoch: 2}
|
||||
require.NoError(t, f.UpdateJustifiedCheckpoint(jc))
|
||||
require.NoError(t, f.UpdateFinalizedCheckpoint(fc))
|
||||
require.Equal(t, f.store.justifiedEpoch, jc.Epoch)
|
||||
require.Equal(t, f.store.finalizedEpoch, fc.Epoch)
|
||||
require.Equal(t, f.store.justifiedCheckpoint.Epoch, jc.Epoch)
|
||||
require.Equal(t, f.store.justifiedCheckpoint.Root, jc.Root)
|
||||
require.Equal(t, f.store.finalizedCheckpoint.Epoch, fc.Epoch)
|
||||
require.Equal(t, f.store.finalizedCheckpoint.Root, fc.Root)
|
||||
}
|
||||
|
||||
func TestStore_CommonAncestor(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(0, 0)
|
||||
|
||||
// /-- b -- d -- e
|
||||
// a
|
||||
// \-- c -- f
|
||||
// \-- g
|
||||
// \ -- h -- i -- j
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, [32]byte{'d'}, [32]byte{'b'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, [32]byte{'e'}, [32]byte{'d'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, [32]byte{'f'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 6, [32]byte{'g'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 7, [32]byte{'h'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 8, [32]byte{'i'}, [32]byte{'h'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 9, [32]byte{'j'}, [32]byte{'i'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
r1 [32]byte
|
||||
r2 [32]byte
|
||||
wantRoot [32]byte
|
||||
}{
|
||||
{
|
||||
name: "Common ancestor between c and b is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between c and d is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'d'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between c and e is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'e'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between g and f is c",
|
||||
r1: [32]byte{'g'},
|
||||
r2: [32]byte{'f'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between f and h is c",
|
||||
r1: [32]byte{'f'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between g and h is c",
|
||||
r1: [32]byte{'g'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between b and h is a",
|
||||
r1: [32]byte{'b'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between e and h is a",
|
||||
r1: [32]byte{'e'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between i and f is c",
|
||||
r1: [32]byte{'i'},
|
||||
r2: [32]byte{'f'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between e and h is a",
|
||||
r1: [32]byte{'j'},
|
||||
r2: [32]byte{'g'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wantRoot, gotRoot)
|
||||
})
|
||||
}
|
||||
|
||||
// a -- b -- c -- d
|
||||
f = setup(0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
tests = []struct {
|
||||
name string
|
||||
r1 [32]byte
|
||||
r2 [32]byte
|
||||
wantRoot [32]byte
|
||||
}{
|
||||
{
|
||||
name: "Common ancestor between a and b is a",
|
||||
r1: [32]byte{'a'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between b and d is b",
|
||||
r1: [32]byte{'d'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'b'},
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between d and a is a",
|
||||
r1: [32]byte{'d'},
|
||||
r2: [32]byte{'a'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wantRoot, gotRoot)
|
||||
})
|
||||
}
|
||||
|
||||
// Equal inputs should return the same root.
|
||||
r, err := f.CommonAncestorRoot(ctx, [32]byte{'b'}, [32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'b'}, r)
|
||||
// Requesting finalized root (last node) should return the same root.
|
||||
r, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'a'}, r)
|
||||
// Requesting unknown root
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'z'})
|
||||
require.ErrorIs(t, err, ErrNilNode)
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'z'}, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, ErrNilNode)
|
||||
n := &Node{
|
||||
slot: 100,
|
||||
root: [32]byte{'y'},
|
||||
justifiedEpoch: 1,
|
||||
unrealizedJustifiedEpoch: 1,
|
||||
finalizedEpoch: 1,
|
||||
unrealizedFinalizedEpoch: 1,
|
||||
optimistic: true,
|
||||
}
|
||||
|
||||
f.store.nodeByRoot[[32]byte{'y'}] = n
|
||||
// broken link
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'y'}, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
}
|
||||
|
||||
func TestStore_InsertOptimisticChain(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
blks := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 1
|
||||
pr := [32]byte{}
|
||||
blk.Block.ParentRoot = pr[:]
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(),
|
||||
JustifiedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
})
|
||||
for i := uint64(2); i < 11; i++ {
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = types.Slot(i)
|
||||
copiedRoot := root
|
||||
blk.Block.ParentRoot = copiedRoot[:]
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(),
|
||||
JustifiedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
})
|
||||
root, err = blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
args := make([]*forkchoicetypes.BlockAndCheckpoints, 10)
|
||||
for i := 0; i < len(blks); i++ {
|
||||
args[i] = blks[10-i-1]
|
||||
}
|
||||
require.NoError(t, f.InsertOptimisticChain(context.Background(), args))
|
||||
|
||||
f = setup(1, 1)
|
||||
require.NoError(t, f.InsertOptimisticChain(context.Background(), args[2:]))
|
||||
}
|
||||
|
||||
@@ -12,9 +12,10 @@ import (
|
||||
func TestNoVote_CanFindHead(t *testing.T) {
|
||||
balances := make([]uint64, 16)
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
|
||||
// The head should always start at the finalized block.
|
||||
r, err := f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
|
||||
r, err := f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
if r != params.BeaconConfig().ZeroHash {
|
||||
t.Errorf("Incorrect head with genesis")
|
||||
@@ -24,8 +25,10 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 0
|
||||
// /
|
||||
// 2 <- head
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
|
||||
state, blkRoot, err := prepareForkchoiceState(context.Background(), 0, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
@@ -33,8 +36,10 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 0
|
||||
// / \
|
||||
// head -> 2 1
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
@@ -44,8 +49,10 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// head -> 2 1
|
||||
// |
|
||||
// 3
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(2), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
@@ -55,8 +62,10 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 2 1
|
||||
// | |
|
||||
// head -> 4 3
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
@@ -68,8 +77,10 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// head -> 4 3
|
||||
// |
|
||||
// 5 <- justified epoch = 2
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 2, 1))
|
||||
r, err = f.Head(context.Background(), params.BeaconConfig().ZeroHash, balances)
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 2, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
@@ -81,7 +92,8 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// head -> 4 3
|
||||
// |
|
||||
// 5 <- starting from 5 with justified epoch 0 should error
|
||||
_, err = f.Head(context.Background(), indexToHash(5), balances)
|
||||
f.store.justifiedCheckpoint.Root = indexToHash(5)
|
||||
_, err = f.Head(context.Background(), balances)
|
||||
wanted := "head at slot 0 with weight 0 is not eligible, finalizedEpoch 1 != 1, justifiedEpoch 2 != 1"
|
||||
require.ErrorContains(t, wanted, err)
|
||||
|
||||
@@ -93,8 +105,8 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 4 3
|
||||
// |
|
||||
// 5 <- head
|
||||
f.store.justifiedEpoch = 2
|
||||
r, err = f.Head(context.Background(), indexToHash(5), balances)
|
||||
f.store.justifiedCheckpoint.Epoch = 2
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 2")
|
||||
|
||||
@@ -108,8 +120,10 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
// 5
|
||||
// |
|
||||
// 6 <- head
|
||||
require.NoError(t, f.InsertOptimisticBlock(context.Background(), 0, indexToHash(6), indexToHash(5), params.BeaconConfig().ZeroHash, 2, 1))
|
||||
r, err = f.Head(context.Background(), indexToHash(5), balances)
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(5), params.BeaconConfig().ZeroHash, 2, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 2")
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -55,7 +56,7 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
|
||||
hasViableDescendant := false
|
||||
for _, child := range n.children {
|
||||
if child == nil {
|
||||
return ErrNilNode
|
||||
return errors.Wrap(ErrNilNode, "could not update best descendant")
|
||||
}
|
||||
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch); err != nil {
|
||||
return err
|
||||
|
||||
@@ -13,9 +13,15 @@ import (
|
||||
func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// The updated balances of each node is 100
|
||||
s := f.store
|
||||
@@ -36,9 +42,15 @@ func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
|
||||
func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// The updated balances of each node is 100
|
||||
s := f.store
|
||||
@@ -63,7 +75,9 @@ func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is not viable.
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 3))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// Verify parent's best child and best descendant are `none`.
|
||||
s := f.store
|
||||
@@ -76,7 +90,9 @@ func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is best descendant
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
s := f.store
|
||||
assert.Equal(t, 1, len(s.treeRootNode.children))
|
||||
@@ -87,8 +103,12 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is best descendant
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 100
|
||||
@@ -103,8 +123,12 @@ func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is best descendant
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 200
|
||||
@@ -119,9 +143,15 @@ func TestNode_TestDepth(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is best descendant
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
s := f.store
|
||||
require.Equal(t, s.nodeByRoot[indexToHash(2)].depth(), uint64(2))
|
||||
@@ -151,11 +181,21 @@ func TestNode_ViableForHead(t *testing.T) {
|
||||
func TestNode_LeadsToViableHead(t *testing.T) {
|
||||
f := setup(4, 3)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(3), params.BeaconConfig().ZeroHash, 4, 3))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(3), params.BeaconConfig().ZeroHash, 4, 3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 3))
|
||||
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 3))
|
||||
@@ -172,13 +212,23 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
// \
|
||||
// -- 5 (true)
|
||||
//
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, params.BeaconConfig().ZeroHash))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(1)))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
opt, err := f.IsOptimistic(indexToHash(5))
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -3,6 +3,7 @@ package doublylinkedtree
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
)
|
||||
|
||||
@@ -14,7 +15,7 @@ func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, pa
|
||||
node, ok = s.nodeByRoot[parentRoot]
|
||||
if !ok || node == nil {
|
||||
s.nodesLock.Unlock()
|
||||
return invalidRoots, ErrNilNode
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
|
||||
}
|
||||
// return early if the parent is LVH
|
||||
if node.payloadHash == payloadHash {
|
||||
@@ -24,7 +25,7 @@ func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, pa
|
||||
} else {
|
||||
if node == nil {
|
||||
s.nodesLock.Unlock()
|
||||
return invalidRoots, ErrNilNode
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
|
||||
}
|
||||
if node.parent.root != parentRoot {
|
||||
s.nodesLock.Unlock()
|
||||
@@ -66,7 +67,7 @@ func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error)
|
||||
invalidRoots := make([][32]byte, 0)
|
||||
|
||||
if node == nil {
|
||||
return invalidRoots, ErrNilNode
|
||||
return invalidRoots, errors.Wrap(ErrNilNode, "could not remove node")
|
||||
}
|
||||
if !node.optimistic || node.parent == nil {
|
||||
return invalidRoots, errInvalidOptimisticStatus
|
||||
|
||||
@@ -157,18 +157,42 @@ func TestPruneInvalid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
roots, err := f.store.setOptimisticToInvalid(context.Background(), tc.root, tc.parentRoot, tc.payload)
|
||||
if tc.wantedErr == nil {
|
||||
@@ -186,16 +210,22 @@ func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
f.store.proposerBoostLock.Lock()
|
||||
f.store.proposerBoostRoot = [32]byte{'c'}
|
||||
f.store.previousProposerBoostScore = 10
|
||||
f.store.previousProposerBoostRoot = [32]byte{'b'}
|
||||
f.store.proposerBoostLock.Unlock()
|
||||
|
||||
_, err := f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'A'})
|
||||
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'A'})
|
||||
require.NoError(t, err)
|
||||
f.store.proposerBoostLock.RLock()
|
||||
require.Equal(t, uint64(0), f.store.previousProposerBoostScore)
|
||||
@@ -216,12 +246,20 @@ func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'D'}, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'D'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
_, err := f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'})
|
||||
_, err = f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(f.store.nodeByRoot[[32]byte{'a'}].children))
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
f := setup(jEpoch, fEpoch)
|
||||
|
||||
// The head should always start at the finalized block.
|
||||
headRoot, err := f.Head(ctx, zeroHash, balances)
|
||||
headRoot, err := f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, zeroHash, headRoot, "Incorrect head with genesis")
|
||||
|
||||
@@ -45,19 +45,20 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// 1 <- HEAD
|
||||
slot := types.Slot(1)
|
||||
newRoot := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
state, blkRoot, err := prepareForkchoiceState(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
f.ProcessAttestation(ctx, []uint64{0}, newRoot, fEpoch)
|
||||
headRoot, err = f.Head(ctx, zeroHash, balances)
|
||||
headRoot, err = f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 1")
|
||||
|
||||
@@ -69,19 +70,19 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// 2 <- HEAD
|
||||
slot = types.Slot(2)
|
||||
newRoot = indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
state, blkRoot, err = prepareForkchoiceState(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
f.ProcessAttestation(ctx, []uint64{1}, newRoot, fEpoch)
|
||||
headRoot, err = f.Head(ctx, zeroHash, balances)
|
||||
headRoot, err = f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 2")
|
||||
|
||||
@@ -95,19 +96,19 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// 3 <- HEAD
|
||||
slot = types.Slot(3)
|
||||
newRoot = indexToHash(3)
|
||||
require.NoError(t,
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
state, blkRoot, err = prepareForkchoiceState(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
headRoot,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
f.ProcessAttestation(ctx, []uint64{2}, newRoot, fEpoch)
|
||||
headRoot, err = f.Head(ctx, zeroHash, balances)
|
||||
headRoot, err = f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
|
||||
|
||||
@@ -122,17 +123,17 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// 4 <- HEAD
|
||||
slot = types.Slot(4)
|
||||
newRoot = indexToHash(4)
|
||||
require.NoError(t,
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
indexToHash(2),
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
state, blkRoot, err = prepareForkchoiceState(
|
||||
ctx,
|
||||
slot,
|
||||
newRoot,
|
||||
indexToHash(2),
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
|
||||
clockSlot := types.Slot(4)
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
@@ -143,7 +144,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
}
|
||||
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, args))
|
||||
headRoot, err = f.Head(ctx, zeroHash, balances)
|
||||
headRoot, err = f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
|
||||
|
||||
@@ -185,7 +186,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// Regression: process attestations for C, check that it
|
||||
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight
|
||||
f.ProcessAttestation(ctx, []uint64{4, 5}, indexToHash(3), fEpoch)
|
||||
headRoot, err = f.Head(ctx, zeroHash, balances)
|
||||
headRoot, err = f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(3), headRoot, "Incorrect head for justified epoch at slot 4")
|
||||
|
||||
@@ -194,7 +195,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
f := setup(jEpoch, fEpoch)
|
||||
|
||||
// The head should always start at the finalized block.
|
||||
r, err := f.Head(ctx, zeroHash, balances)
|
||||
r, err := f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, zeroHash, r, "Incorrect head with genesis")
|
||||
|
||||
@@ -208,37 +209,37 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// C <- Slot 2 HEAD
|
||||
honestBlockSlot := types.Slot(2)
|
||||
honestBlock := indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
honestBlockSlot,
|
||||
honestBlock,
|
||||
zeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
state, blkRoot, err := prepareForkchoiceState(
|
||||
ctx,
|
||||
honestBlockSlot,
|
||||
honestBlock,
|
||||
zeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
)
|
||||
r, err = f.Head(ctx, zeroHash, balances)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
r, err = f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
|
||||
|
||||
maliciouslyWithheldBlockSlot := types.Slot(1)
|
||||
maliciouslyWithheldBlock := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
maliciouslyWithheldBlockSlot,
|
||||
maliciouslyWithheldBlock,
|
||||
zeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
state, blkRoot, err = prepareForkchoiceState(
|
||||
ctx,
|
||||
maliciouslyWithheldBlockSlot,
|
||||
maliciouslyWithheldBlock,
|
||||
zeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// Ensure the head is C, the honest block.
|
||||
r, err = f.Head(ctx, zeroHash, balances)
|
||||
r, err = f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
|
||||
|
||||
@@ -259,7 +260,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
f.ProcessAttestation(ctx, votes, honestBlock, fEpoch)
|
||||
|
||||
// Ensure the head is STILL C, the honest block, as the honest block had proposer boost.
|
||||
r, err = f.Head(ctx, zeroHash, balances)
|
||||
r, err = f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
|
||||
})
|
||||
@@ -267,7 +268,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
f := setup(jEpoch, fEpoch)
|
||||
|
||||
// The head should always start at the finalized block.
|
||||
r, err := f.Head(ctx, zeroHash, balances)
|
||||
r, err := f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, zeroHash, r, "Incorrect head with genesis")
|
||||
|
||||
@@ -281,39 +282,39 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// C <- Slot 2 HEAD
|
||||
honestBlockSlot := types.Slot(2)
|
||||
honestBlock := indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
honestBlockSlot,
|
||||
honestBlock,
|
||||
zeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
state, blkRoot, err := prepareForkchoiceState(
|
||||
ctx,
|
||||
honestBlockSlot,
|
||||
honestBlock,
|
||||
zeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// Ensure C is the head.
|
||||
r, err = f.Head(ctx, zeroHash, balances)
|
||||
r, err = f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
|
||||
|
||||
maliciouslyWithheldBlockSlot := types.Slot(1)
|
||||
maliciouslyWithheldBlock := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
maliciouslyWithheldBlockSlot,
|
||||
maliciouslyWithheldBlock,
|
||||
zeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
state, blkRoot, err = prepareForkchoiceState(
|
||||
ctx,
|
||||
maliciouslyWithheldBlockSlot,
|
||||
maliciouslyWithheldBlock,
|
||||
zeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// Ensure C is still the head after the malicious proposer reveals their block.
|
||||
r, err = f.Head(ctx, zeroHash, balances)
|
||||
r, err = f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
|
||||
|
||||
@@ -332,7 +333,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
f.ProcessAttestation(ctx, votes, maliciouslyWithheldBlock, fEpoch)
|
||||
|
||||
// Expect the head to have switched to B.
|
||||
r, err = f.Head(ctx, zeroHash, balances)
|
||||
r, err = f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, maliciouslyWithheldBlock, r, "Expected B to become the head")
|
||||
})
|
||||
@@ -354,26 +355,26 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
a := zeroHash
|
||||
|
||||
// The head should always start at the finalized block.
|
||||
r, err := f.Head(ctx, zeroHash, balances)
|
||||
r, err := f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, zeroHash, r, "Incorrect head with genesis")
|
||||
|
||||
cSlot := types.Slot(2)
|
||||
c := indexToHash(2)
|
||||
require.NoError(t,
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
cSlot,
|
||||
c,
|
||||
a, // parent
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
state, blkRoot, err := prepareForkchoiceState(
|
||||
ctx,
|
||||
cSlot,
|
||||
c,
|
||||
a, // parent
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// Ensure C is the head.
|
||||
r, err = f.Head(ctx, zeroHash, balances)
|
||||
r, err = f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, c, r, "Incorrect head for justified epoch at slot 2")
|
||||
|
||||
@@ -388,20 +389,20 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
|
||||
bSlot := types.Slot(1)
|
||||
b := indexToHash(1)
|
||||
require.NoError(t,
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
bSlot,
|
||||
b,
|
||||
a, // parent
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
state, blkRoot, err = prepareForkchoiceState(
|
||||
ctx,
|
||||
bSlot,
|
||||
b,
|
||||
a, // parent
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// Ensure C is still the head.
|
||||
r, err = f.Head(ctx, zeroHash, balances)
|
||||
r, err = f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, c, r, "Incorrect head for justified epoch at slot 2")
|
||||
|
||||
@@ -412,20 +413,20 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// A block D, building on B, is received at slot N+3. It should not be able to win without boosting.
|
||||
dSlot := types.Slot(3)
|
||||
d := indexToHash(3)
|
||||
require.NoError(t,
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
dSlot,
|
||||
d,
|
||||
b, // parent
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
state, blkRoot, err = prepareForkchoiceState(
|
||||
ctx,
|
||||
dSlot,
|
||||
d,
|
||||
b, // parent
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// D cannot win without a boost.
|
||||
r, err = f.Head(ctx, zeroHash, balances)
|
||||
r, err = f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, c, r, "Expected C to remain the head")
|
||||
|
||||
@@ -441,7 +442,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, args))
|
||||
|
||||
// Ensure D becomes the head thanks to boosting.
|
||||
r, err = f.Head(ctx, zeroHash, balances)
|
||||
r, err = f.Head(ctx, balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, d, r, "Expected D to become the head")
|
||||
})
|
||||
|
||||
@@ -60,12 +60,12 @@ func (s *Store) PruneThreshold() uint64 {
|
||||
|
||||
// head starts from justified root and then follows the best descendant links
|
||||
// to find the best block for head. This function assumes a lock on s.nodesLock
|
||||
func (s *Store) head(ctx context.Context, justifiedRoot [32]byte) ([32]byte, error) {
|
||||
func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.head")
|
||||
defer span.End()
|
||||
|
||||
// JustifiedRoot has to be known
|
||||
justifiedNode, ok := s.nodeByRoot[justifiedRoot]
|
||||
justifiedNode, ok := s.nodeByRoot[s.justifiedCheckpoint.Root]
|
||||
if !ok || justifiedNode == nil {
|
||||
return [32]byte{}, errUnknownJustifiedRoot
|
||||
}
|
||||
@@ -77,9 +77,9 @@ func (s *Store) head(ctx context.Context, justifiedRoot [32]byte) ([32]byte, err
|
||||
bestDescendant = justifiedNode
|
||||
}
|
||||
|
||||
if !bestDescendant.viableForHead(s.justifiedEpoch, s.finalizedEpoch) {
|
||||
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch) {
|
||||
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch %d != %d, justifiedEpoch %d != %d",
|
||||
bestDescendant.slot, bestDescendant.weight/10e9, bestDescendant.finalizedEpoch, s.finalizedEpoch, bestDescendant.justifiedEpoch, s.justifiedEpoch)
|
||||
bestDescendant.slot, bestDescendant.weight/10e9, bestDescendant.finalizedEpoch, s.finalizedCheckpoint.Epoch, bestDescendant.justifiedEpoch, s.justifiedCheckpoint.Epoch)
|
||||
}
|
||||
|
||||
// Update metrics.
|
||||
@@ -125,19 +125,21 @@ func (s *Store) insert(ctx context.Context,
|
||||
|
||||
s.nodeByPayload[payloadHash] = n
|
||||
s.nodeByRoot[root] = n
|
||||
if parent != nil {
|
||||
if parent == nil {
|
||||
if s.treeRootNode == nil {
|
||||
s.treeRootNode = n
|
||||
s.headNode = n
|
||||
} else {
|
||||
return errInvalidParentRoot
|
||||
}
|
||||
} else {
|
||||
parent.children = append(parent.children, n)
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx, s.justifiedEpoch, s.finalizedEpoch); err != nil {
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx,
|
||||
s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Set the node as root if the store was empty
|
||||
if s.treeRootNode == nil {
|
||||
s.treeRootNode = n
|
||||
s.headNode = n
|
||||
}
|
||||
|
||||
// Update metrics.
|
||||
processedBlockCount.Inc()
|
||||
nodeCount.Set(float64(len(s.nodeByRoot)))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user