mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
103 Commits
fix-withdr
...
v2.1.3-rc.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4de92bafc4 | ||
|
|
69438583e5 | ||
|
|
e81f3fed01 | ||
|
|
1b2a5fb4a5 | ||
|
|
6c878b1665 | ||
|
|
838963c9f7 | ||
|
|
7b38f8b8fc | ||
|
|
23e8e695cc | ||
|
|
ce9eaae22e | ||
|
|
7010e8dec8 | ||
|
|
9e4ba75e71 | ||
|
|
044a4ad5a3 | ||
|
|
690084cab6 | ||
|
|
88db7117d2 | ||
|
|
1faa292615 | ||
|
|
434018a4b9 | ||
|
|
54624569bf | ||
|
|
b55ddb5a34 | ||
|
|
a38de90435 | ||
|
|
d454d30f19 | ||
|
|
b04dd9fe5c | ||
|
|
8140a1a7e0 | ||
|
|
cab9917317 | ||
|
|
4c4fb9f2c0 | ||
|
|
80f4f22401 | ||
|
|
dd296cbd8a | ||
|
|
f9e3b0a3c2 | ||
|
|
a58809597e | ||
|
|
7f443e8387 | ||
|
|
18fc17c903 | ||
|
|
d7b01b9d81 | ||
|
|
7ebd9035dd | ||
|
|
578fea73d7 | ||
|
|
7fcadbe3ef | ||
|
|
fce9e6883d | ||
|
|
5ee66a4a68 | ||
|
|
f4c7fb6182 | ||
|
|
077dcdc643 | ||
|
|
1fa864cb1a | ||
|
|
6357860cc2 | ||
|
|
cc1ea81d4a | ||
|
|
dd65622441 | ||
|
|
6c39301f33 | ||
|
|
5b12f5a27d | ||
|
|
105d73d59b | ||
|
|
db687bf56d | ||
|
|
f0403afb25 | ||
|
|
3f309968d4 | ||
|
|
52acaceb3f | ||
|
|
5216402f66 | ||
|
|
2536195be0 | ||
|
|
a4b9e006af | ||
|
|
76b941b310 | ||
|
|
d099c2790b | ||
|
|
2fc3d41ffa | ||
|
|
2e4174bdd6 | ||
|
|
a170fd4bd6 | ||
|
|
3e36eacd1e | ||
|
|
0e9dfe04a1 | ||
|
|
302a5cf00b | ||
|
|
c72f1af951 | ||
|
|
2bc3ef29e0 | ||
|
|
bc91d63fcf | ||
|
|
8f18920ac7 | ||
|
|
2cc62cdf40 | ||
|
|
95c140b512 | ||
|
|
7563bc0444 | ||
|
|
4353d39daa | ||
|
|
2de2000eb7 | ||
|
|
94f6389ebd | ||
|
|
b582ca27e6 | ||
|
|
2586a9e667 | ||
|
|
87251d627d | ||
|
|
3ff285dda5 | ||
|
|
364ad3fbda | ||
|
|
4cbb69602f | ||
|
|
64920d719d | ||
|
|
3cf385fe91 | ||
|
|
adabd1fa4f | ||
|
|
9be2111b7a | ||
|
|
1dec9eb912 | ||
|
|
a2951ec37d | ||
|
|
216fdb48cf | ||
|
|
61c5e2a443 | ||
|
|
da9f72360d | ||
|
|
1be46aa16e | ||
|
|
a1a12243be | ||
|
|
a1dd2e6b8c | ||
|
|
ecb605814e | ||
|
|
61cbe3709b | ||
|
|
7039c382bf | ||
|
|
4f00984ab1 | ||
|
|
edb03328ea | ||
|
|
c922eb9bfc | ||
|
|
051a83a83d | ||
|
|
f46ff626df | ||
|
|
8130ff29bc | ||
|
|
5d4078305a | ||
|
|
6910460173 | ||
|
|
7cc291c09f | ||
|
|
76645bccee | ||
|
|
46c0579816 | ||
|
|
c66d9e9a11 |
@@ -10,7 +10,7 @@
|
||||
|
||||
# Prysm specific remote-cache properties.
|
||||
#build:remote-cache --disk_cache=
|
||||
build:remote-cache --remote_download_minimal
|
||||
build:remote-cache --remote_download_toplevel
|
||||
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --remote_local_fallback
|
||||
|
||||
10
.github/CODEOWNERS
vendored
10
.github/CODEOWNERS
vendored
@@ -9,8 +9,8 @@ deps.bzl @prysmaticlabs/core-team
|
||||
|
||||
# Radek and Nishant are responsible for changes that can affect the native state feature.
|
||||
# See https://www.notion.so/prysmaticlabs/Native-Beacon-State-Redesign-6cc9744b4ec1439bb34fa829b36a35c1
|
||||
/beacon-chain/state/fieldtrie/ @rkapka @nisdas
|
||||
/beacon-chain/state/v1/ @rkapka @nisdas
|
||||
/beacon-chain/state/v2/ @rkapka @nisdas
|
||||
/beacon-chain/state/v3/ @rkapka @nisdas
|
||||
/beacon-chain/state/state-native/ @rkapka @nisdas
|
||||
/beacon-chain/state/fieldtrie/ @rkapka @nisdas @rauljordan
|
||||
/beacon-chain/state/v1/ @rkapka @nisdas @rauljordan
|
||||
/beacon-chain/state/v2/ @rkapka @nisdas @rauljordan
|
||||
/beacon-chain/state/v3/ @rkapka @nisdas @rauljordan
|
||||
/beacon-chain/state/state-native/ @rkapka @nisdas @rauljordan
|
||||
|
||||
14
.github/workflows/go.yml
vendored
14
.github/workflows/go.yml
vendored
@@ -64,7 +64,6 @@ jobs:
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode -E errcheck -E gosimple --skip-files=validator/web/site_data.go --skip-dirs=proto --go=1.18
|
||||
version: v1.45.2
|
||||
skip-go-installation: true
|
||||
|
||||
@@ -88,11 +87,14 @@ jobs:
|
||||
- name: Build
|
||||
# Use blst tag to allow go and bazel builds for blst.
|
||||
run: go build -v ./...
|
||||
|
||||
# fuzz and blst_disabled leverage go tag based stubs at compile time.
|
||||
# Building with these tags should be checked and enforced at pre-submit.
|
||||
- name: Build for fuzzing
|
||||
run: go build -tags=fuzz,blst_disabled ./...
|
||||
env:
|
||||
CGO_CFLAGS: "-O -D__BLST_PORTABLE__"
|
||||
# fuzz leverage go tag based stubs at compile time.
|
||||
# Building and testing with these tags should be checked and enforced at pre-submit.
|
||||
- name: Test for fuzzing
|
||||
run: go test -tags=fuzz,develop ./... -test.run=^Fuzz
|
||||
env:
|
||||
CGO_CFLAGS: "-O -D__BLST_PORTABLE__"
|
||||
|
||||
# Tests run via Bazel for now...
|
||||
# - name: Test
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -35,3 +35,6 @@ bin
|
||||
|
||||
# p2p metaData
|
||||
metaData
|
||||
|
||||
# execution API authentication
|
||||
jwt.hex
|
||||
|
||||
@@ -1,69 +1,26 @@
|
||||
linters-settings:
|
||||
govet:
|
||||
check-shadowing: true
|
||||
settings:
|
||||
printf:
|
||||
funcs:
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf
|
||||
golint:
|
||||
min-confidence: 0
|
||||
gocyclo:
|
||||
min-complexity: 10
|
||||
maligned:
|
||||
suggest-new: true
|
||||
dupl:
|
||||
threshold: 100
|
||||
goconst:
|
||||
min-len: 2
|
||||
min-occurrences: 2
|
||||
depguard:
|
||||
list-type: blacklist
|
||||
packages:
|
||||
# logging is allowed only by logutils.Log, logrus
|
||||
# is allowed to use only in logutils package
|
||||
- github.com/sirupsen/logrus
|
||||
misspell:
|
||||
locale: US
|
||||
lll:
|
||||
line-length: 140
|
||||
goimports:
|
||||
local-prefixes: github.com/golangci/golangci-lint
|
||||
gocritic:
|
||||
enabled-tags:
|
||||
- performance
|
||||
- style
|
||||
- experimental
|
||||
disabled-checks:
|
||||
- wrapperFunc
|
||||
run:
|
||||
skip-files:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
skip-dirs:
|
||||
- proto
|
||||
- tools/analyzers
|
||||
timeout: 10m
|
||||
go: '1.18'
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- deadcode
|
||||
- goconst
|
||||
- goimports
|
||||
- golint
|
||||
- gosec
|
||||
- misspell
|
||||
- structcheck
|
||||
- typecheck
|
||||
- unparam
|
||||
- varcheck
|
||||
- gofmt
|
||||
- unused
|
||||
disable-all: true
|
||||
- errcheck
|
||||
- gosimple
|
||||
- gocognit
|
||||
|
||||
run:
|
||||
skip-dirs:
|
||||
- proto/
|
||||
- ^contracts/
|
||||
deadline: 10m
|
||||
linters-settings:
|
||||
gocognit:
|
||||
# TODO: We should target for < 50
|
||||
min-complexity: 97
|
||||
|
||||
# golangci.com configuration
|
||||
# https://github.com/golangci/golangci/wiki/Configuration
|
||||
service:
|
||||
golangci-lint-version: 1.15.0 # use the fixed version to not introduce new linters unexpectedly
|
||||
prepare:
|
||||
- echo "here I can run custom commands, but no preparation needed for this repo"
|
||||
output:
|
||||
print-issued-lines: true
|
||||
sort-results: true
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.1.10)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.2.0-rc.1)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-alpha.9/src/engine)
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -215,7 +215,7 @@ filegroup(
|
||||
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.1.10"
|
||||
consensus_spec_version = "v1.2.0-rc.1"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -231,7 +231,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "28043009cc2f6fc9804e73c8c1fc2cb27062f1591e6884f3015ae1dd7a276883",
|
||||
sha256 = "9c93f87378aaa6d6fe1c67b396eac2aacc9594af2a83f028cb99c95dea5b81df",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -247,7 +247,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "bc1a283ca068f310f04d70c4f6a8eaa0b8f7e9318073a8bdc2ee233111b4e339",
|
||||
sha256 = "52f2c52415228cee8a4de5a09abff785f439a77dfef8f03e834e4e16857673c1",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -263,7 +263,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "bbabb482c229ff9d4e2c7b77c992edb452f9d0af7c6d8dd4f922f06a7b101e81",
|
||||
sha256 = "022dcc0d6de7dd27b337a0d1b945077eaf5ee47000700395a693fc25e12f96df",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -278,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "408a5524548ad3fcf387f65ac7ec52781d9ee899499720bb12451b48a15818d4",
|
||||
sha256 = "0a9c110305cbd6ebbe0d942f0f33e6ce22dd484ce4ceed277bf185a091941cde",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -26,69 +26,126 @@ type OriginData struct {
|
||||
bb []byte
|
||||
st state.BeaconState
|
||||
b interfaces.SignedBeaconBlock
|
||||
cf *detect.VersionedUnmarshaler
|
||||
}
|
||||
|
||||
// CheckpointString returns the standard string representation of a Checkpoint for the block root and epoch for the
|
||||
// SignedBeaconBlock value found by DownloadOriginData.
|
||||
// The format is a a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
|
||||
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
|
||||
func (od *OriginData) CheckpointString() string {
|
||||
return fmt.Sprintf("%#x:%d", od.wsd.BlockRoot, od.wsd.Epoch)
|
||||
vu *detect.VersionedUnmarshaler
|
||||
br [32]byte
|
||||
sr [32]byte
|
||||
}
|
||||
|
||||
// SaveBlock saves the downloaded block to a unique file in the given path.
|
||||
// For readability and collision avoidance, the file name includes: type, config name, slot and root
|
||||
func (od *OriginData) SaveBlock(dir string) (string, error) {
|
||||
blockPath := path.Join(dir, fname("block", od.cf, od.b.Block().Slot(), od.wsd.BlockRoot))
|
||||
return blockPath, file.WriteFile(blockPath, od.BlockBytes())
|
||||
func (o *OriginData) SaveBlock(dir string) (string, error) {
|
||||
blockPath := path.Join(dir, fname("block", o.vu, o.b.Block().Slot(), o.br))
|
||||
return blockPath, file.WriteFile(blockPath, o.BlockBytes())
|
||||
}
|
||||
|
||||
// SaveState saves the downloaded state to a unique file in the given path.
|
||||
// For readability and collision avoidance, the file name includes: type, config name, slot and root
|
||||
func (od *OriginData) SaveState(dir string) (string, error) {
|
||||
statePath := path.Join(dir, fname("state", od.cf, od.st.Slot(), od.wsd.StateRoot))
|
||||
return statePath, file.WriteFile(statePath, od.StateBytes())
|
||||
func (o *OriginData) SaveState(dir string) (string, error) {
|
||||
statePath := path.Join(dir, fname("state", o.vu, o.st.Slot(), o.sr))
|
||||
return statePath, file.WriteFile(statePath, o.StateBytes())
|
||||
}
|
||||
|
||||
// StateBytes returns the ssz-encoded bytes of the downloaded BeaconState value.
|
||||
func (od *OriginData) StateBytes() []byte {
|
||||
return od.sb
|
||||
func (o *OriginData) StateBytes() []byte {
|
||||
return o.sb
|
||||
}
|
||||
|
||||
// BlockBytes returns the ssz-encoded bytes of the downloaded SignedBeaconBlock value.
|
||||
func (od *OriginData) BlockBytes() []byte {
|
||||
return od.bb
|
||||
func (o *OriginData) BlockBytes() []byte {
|
||||
return o.bb
|
||||
}
|
||||
|
||||
func fname(prefix string, cf *detect.VersionedUnmarshaler, slot types.Slot, root [32]byte) string {
|
||||
return fmt.Sprintf("%s_%s_%s_%d-%#x.ssz", prefix, cf.Config.ConfigName, version.String(cf.Fork), slot, root)
|
||||
func fname(prefix string, vu *detect.VersionedUnmarshaler, slot types.Slot, root [32]byte) string {
|
||||
return fmt.Sprintf("%s_%s_%s_%d-%#x.ssz", prefix, vu.Config.ConfigName, version.String(vu.Fork), slot, root)
|
||||
}
|
||||
|
||||
// this method downloads the head state, which can be used to find the correct chain config
|
||||
// and use prysm's helper methods to compute the latest weak subjectivity epoch.
|
||||
func getWeakSubjectivityEpochFromHead(ctx context.Context, client *Client) (types.Epoch, error) {
|
||||
headBytes, err := client.GetState(ctx, IdHead)
|
||||
// DownloadFinalizedData downloads the most recently finalized state, and the block most recently applied to that state.
|
||||
// This pair can be used to initialize a new beacon node via checkpoint sync.
|
||||
func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, error) {
|
||||
sb, err := client.GetState(ctx, IdFinalized)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return nil, err
|
||||
}
|
||||
cf, err := detect.FromState(headBytes)
|
||||
vu, err := detect.FromState(sb)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
return nil, errors.Wrap(err, "error detecting chain config for finalized state")
|
||||
}
|
||||
log.Printf("detected supported config in remote head state, name=%s, fork=%s", cf.Config.ConfigName, version.String(cf.Fork))
|
||||
headState, err := cf.UnmarshalBeaconState(headBytes)
|
||||
log.Printf("detected supported config in remote finalized state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
|
||||
s, err := vu.UnmarshalBeaconState(sb)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error unmarshaling state to correct version")
|
||||
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
|
||||
}
|
||||
|
||||
epoch, err := helpers.LatestWeakSubjectivityEpoch(ctx, headState, cf.Config)
|
||||
sr, err := s.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error computing the weak subjectivity epoch from head state")
|
||||
return nil, errors.Wrapf(err, "failed to compute htr for finalized state at slot=%d", s.Slot())
|
||||
}
|
||||
header := s.LatestBlockHeader()
|
||||
header.StateRoot = sr[:]
|
||||
br, err := header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error while computing block root using state data")
|
||||
}
|
||||
|
||||
log.Printf("(computed client-side) weak subjectivity epoch = %d", epoch)
|
||||
return epoch, nil
|
||||
bb, err := client.GetBlock(ctx, IdFromRoot(br))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting block by root = %#x", br)
|
||||
}
|
||||
b, err := vu.UnmarshalBeaconBlock(bb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
|
||||
}
|
||||
realBlockRoot, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
|
||||
}
|
||||
|
||||
log.Printf("BeaconState slot=%d, Block slot=%d", s.Slot(), b.Block().Slot())
|
||||
log.Printf("BeaconState htr=%#xd, Block state_root=%#x", sr, b.Block().StateRoot())
|
||||
log.Printf("BeaconState latest_block_header htr=%#xd, block htr=%#x", br, realBlockRoot)
|
||||
return &OriginData{
|
||||
st: s,
|
||||
b: b,
|
||||
sb: sb,
|
||||
bb: bb,
|
||||
vu: vu,
|
||||
br: br,
|
||||
sr: sr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + SignedBeaconBlock
|
||||
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
|
||||
// a weak subjectivity checkpoint beacon node flag to be used for validation.
|
||||
type WeakSubjectivityData struct {
|
||||
BlockRoot [32]byte
|
||||
StateRoot [32]byte
|
||||
Epoch types.Epoch
|
||||
}
|
||||
|
||||
// CheckpointString returns the standard string representation of a Checkpoint.
|
||||
// The format is a a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
|
||||
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
|
||||
func (wsd *WeakSubjectivityData) CheckpointString() string {
|
||||
return fmt.Sprintf("%#x:%d", wsd.BlockRoot, wsd.Epoch)
|
||||
}
|
||||
|
||||
// ComputeWeakSubjectivityCheckpoint attempts to use the prysm weak_subjectivity api
|
||||
// to obtain the current weak_subjectivity checkpoint.
|
||||
// For non-prysm nodes, the same computation will be performed with extra steps,
|
||||
// using the head state downloaded from the beacon node api.
|
||||
func ComputeWeakSubjectivityCheckpoint(ctx context.Context, client *Client) (*WeakSubjectivityData, error) {
|
||||
ws, err := client.GetWeakSubjectivity(ctx)
|
||||
if err != nil {
|
||||
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
|
||||
if !errors.Is(err, ErrNotOK) {
|
||||
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
|
||||
}
|
||||
// fall back to vanilla Beacon Node API method
|
||||
return computeBackwardsCompatible(ctx, client)
|
||||
}
|
||||
log.Printf("server weak subjectivity checkpoint response - epoch=%d, block_root=%#x, state_root=%#x", ws.Epoch, ws.BlockRoot, ws.StateRoot)
|
||||
return ws, nil
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -96,8 +153,8 @@ const (
|
||||
prysmImplementationName = "Prysm"
|
||||
)
|
||||
|
||||
// ErrUnsupportedPrysmCheckpointVersion indicates remote beacon node can't be used for checkpoint retrieval.
|
||||
var ErrUnsupportedPrysmCheckpointVersion = errors.New("node does not meet minimum version requirements for checkpoint retrieval")
|
||||
// errUnsupportedPrysmCheckpointVersion indicates remote beacon node can't be used for checkpoint retrieval.
|
||||
var errUnsupportedPrysmCheckpointVersion = errors.New("node does not meet minimum version requirements for checkpoint retrieval")
|
||||
|
||||
// for older endpoints or clients that do not support the weak_subjectivity api method
|
||||
// we gather the necessary data for a checkpoint sync by:
|
||||
@@ -105,14 +162,14 @@ var ErrUnsupportedPrysmCheckpointVersion = errors.New("node does not meet minimu
|
||||
// - requesting the state at the first slot of the epoch
|
||||
// - using hash_tree_root(state.latest_block_header) to compute the block the state integrates
|
||||
// - requesting that block by its root
|
||||
func downloadBackwardsCompatible(ctx context.Context, client *Client) (*OriginData, error) {
|
||||
func computeBackwardsCompatible(ctx context.Context, client *Client) (*WeakSubjectivityData, error) {
|
||||
log.Print("falling back to generic checkpoint derivation, weak_subjectivity API not supported by server")
|
||||
nv, err := client.GetNodeVersion(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to proceed with fallback method without confirming node version")
|
||||
}
|
||||
if nv.implementation == prysmImplementationName && semver.Compare(nv.semver, prysmMinimumVersion) < 0 {
|
||||
return nil, errors.Wrapf(ErrUnsupportedPrysmCheckpointVersion, "%s < minimum (%s)", nv.semver, prysmMinimumVersion)
|
||||
return nil, errors.Wrapf(errUnsupportedPrysmCheckpointVersion, "%s < minimum (%s)", nv.semver, prysmMinimumVersion)
|
||||
}
|
||||
epoch, err := getWeakSubjectivityEpochFromHead(ctx, client)
|
||||
if err != nil {
|
||||
@@ -127,136 +184,78 @@ func downloadBackwardsCompatible(ctx context.Context, client *Client) (*OriginDa
|
||||
|
||||
log.Printf("requesting checkpoint state at slot %d", slot)
|
||||
// get the state at the first slot of the epoch
|
||||
stateBytes, err := client.GetState(ctx, IdFromSlot(slot))
|
||||
sb, err := client.GetState(ctx, IdFromSlot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to request state by slot from api, slot=%d", slot)
|
||||
}
|
||||
|
||||
// ConfigFork is used to unmarshal the BeaconState so we can read the block root in latest_block_header
|
||||
cf, err := detect.FromState(stateBytes)
|
||||
vu, err := detect.FromState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", cf.Config.ConfigName, version.String(cf.Fork))
|
||||
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
|
||||
|
||||
st, err := cf.UnmarshalBeaconState(stateBytes)
|
||||
s, err := vu.UnmarshalBeaconState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error using detected config fork to unmarshal state bytes")
|
||||
}
|
||||
|
||||
// compute state and block roots
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
sr, err := s.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of state")
|
||||
}
|
||||
|
||||
header := st.LatestBlockHeader()
|
||||
header.StateRoot = stateRoot[:]
|
||||
computedBlockRoot, err := header.HashTreeRoot()
|
||||
h := s.LatestBlockHeader()
|
||||
h.StateRoot = sr[:]
|
||||
br, err := h.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error while computing block root using state data")
|
||||
}
|
||||
|
||||
blockBytes, err := client.GetBlock(ctx, IdFromRoot(computedBlockRoot))
|
||||
bb, err := client.GetBlock(ctx, IdFromRoot(br))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting block by root = %d", computedBlockRoot)
|
||||
return nil, errors.Wrapf(err, "error requesting block by root = %d", br)
|
||||
}
|
||||
block, err := cf.UnmarshalBeaconBlock(blockBytes)
|
||||
b, err := vu.UnmarshalBeaconBlock(bb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
|
||||
}
|
||||
blockRoot, err := block.Block().HashTreeRoot()
|
||||
br, err = b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root for block obtained via root")
|
||||
}
|
||||
|
||||
log.Printf("BeaconState slot=%d, Block slot=%d", st.Slot(), block.Block().Slot())
|
||||
log.Printf("BeaconState htr=%#xd, Block state_root=%#x", stateRoot, block.Block().StateRoot())
|
||||
log.Printf("BeaconBlock root computed from state=%#x, Block htr=%#x", computedBlockRoot, blockRoot)
|
||||
|
||||
return &OriginData{
|
||||
wsd: &WeakSubjectivityData{
|
||||
BlockRoot: blockRoot,
|
||||
StateRoot: stateRoot,
|
||||
Epoch: epoch,
|
||||
},
|
||||
st: st,
|
||||
sb: stateBytes,
|
||||
b: block,
|
||||
bb: blockBytes,
|
||||
cf: cf,
|
||||
return &WeakSubjectivityData{
|
||||
Epoch: epoch,
|
||||
BlockRoot: br,
|
||||
StateRoot: sr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DownloadOriginData attempts to use the proposed weak_subjectivity beacon node api
|
||||
// to obtain the weak_subjectivity metadata (epoch, block_root, state_root) needed to sync
|
||||
// a beacon node from the canonical weak subjectivity checkpoint. As this is a proposed API
|
||||
// that will only be supported by prysm at first, in the event of a 404 we fallback to using a
|
||||
// different technique where we first download the head state which can be used to compute the
|
||||
// weak subjectivity epoch on the client side.
|
||||
func DownloadOriginData(ctx context.Context, client *Client) (*OriginData, error) {
|
||||
ws, err := client.GetWeakSubjectivity(ctx)
|
||||
// this method downloads the head state, which can be used to find the correct chain config
|
||||
// and use prysm's helper methods to compute the latest weak subjectivity epoch.
|
||||
func getWeakSubjectivityEpochFromHead(ctx context.Context, client *Client) (types.Epoch, error) {
|
||||
headBytes, err := client.GetState(ctx, IdHead)
|
||||
if err != nil {
|
||||
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
|
||||
if !errors.Is(err, ErrNotOK) {
|
||||
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
|
||||
}
|
||||
// fall back to vanilla Beacon Node API method
|
||||
return downloadBackwardsCompatible(ctx, client)
|
||||
return 0, err
|
||||
}
|
||||
log.Printf("server weak subjectivity checkpoint response - epoch=%d, block_root=%#x, state_root=%#x", ws.Epoch, ws.BlockRoot, ws.StateRoot)
|
||||
|
||||
// use first slot of the epoch for the block slot
|
||||
slot, err := slots.EpochStart(ws.Epoch)
|
||||
vu, err := detect.FromState(headBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error computing first slot of epoch=%d", ws.Epoch)
|
||||
return 0, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("requesting checkpoint state at slot %d", slot)
|
||||
|
||||
stateBytes, err := client.GetState(ctx, IdFromSlot(slot))
|
||||
log.Printf("detected supported config in remote head state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
|
||||
headState, err := vu.UnmarshalBeaconState(headBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to request state by slot from api, slot=%d", slot)
|
||||
}
|
||||
cf, err := detect.FromState(stateBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", cf.Config.ConfigName, version.String(cf.Fork))
|
||||
|
||||
state, err := cf.UnmarshalBeaconState(stateBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error using detected config fork to unmarshal state bytes")
|
||||
}
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to compute htr for state at slot=%d", slot)
|
||||
return 0, errors.Wrap(err, "error unmarshaling state to correct version")
|
||||
}
|
||||
|
||||
blockRoot, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
epoch, err := helpers.LatestWeakSubjectivityEpoch(ctx, headState, vu.Config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of latest_block_header")
|
||||
return 0, errors.Wrap(err, "error computing the weak subjectivity epoch from head state")
|
||||
}
|
||||
blockBytes, err := client.GetBlock(ctx, IdFromRoot(ws.BlockRoot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting block by slot = %d", slot)
|
||||
}
|
||||
block, err := cf.UnmarshalBeaconBlock(blockBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
|
||||
}
|
||||
realBlockRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
|
||||
}
|
||||
log.Printf("BeaconState slot=%d, Block slot=%d", state.Slot(), block.Block().Slot())
|
||||
log.Printf("BeaconState htr=%#xd, Block state_root=%#x", stateRoot, block.Block().StateRoot())
|
||||
log.Printf("BeaconState latest_block_header htr=%#xd, block htr=%#x", blockRoot, realBlockRoot)
|
||||
return &OriginData{
|
||||
wsd: ws,
|
||||
st: state,
|
||||
b: block,
|
||||
sb: stateBytes,
|
||||
bb: blockBytes,
|
||||
cf: cf,
|
||||
}, nil
|
||||
|
||||
log.Printf("(computed client-side) weak subjectivity epoch = %d", epoch)
|
||||
return epoch, nil
|
||||
}
|
||||
|
||||
@@ -93,8 +93,8 @@ func TestFallbackVersionCheck(t *testing.T) {
|
||||
}}
|
||||
|
||||
ctx := context.Background()
|
||||
_, err := DownloadOriginData(ctx, c)
|
||||
require.ErrorIs(t, err, ErrUnsupportedPrysmCheckpointVersion)
|
||||
_, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
|
||||
require.ErrorIs(t, err, errUnsupportedPrysmCheckpointVersion)
|
||||
}
|
||||
|
||||
func TestFname(t *testing.T) {
|
||||
@@ -120,9 +120,9 @@ func TestFname(t *testing.T) {
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestDownloadOriginData(t *testing.T) {
|
||||
func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := params.MainnetConfig()
|
||||
cfg := params.MainnetConfig().Copy()
|
||||
|
||||
epoch := cfg.AltairForkEpoch - 1
|
||||
// set up checkpoint state, using the epoch that will be computed as the ws checkpoint state based on the head state
|
||||
@@ -204,19 +204,15 @@ func TestDownloadOriginData(t *testing.T) {
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
|
||||
od, err := DownloadOriginData(ctx, c)
|
||||
wsd, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedWSD.Epoch, od.wsd.Epoch)
|
||||
require.Equal(t, expectedWSD.StateRoot, od.wsd.StateRoot)
|
||||
require.Equal(t, expectedWSD.BlockRoot, od.wsd.BlockRoot)
|
||||
require.DeepEqual(t, wsSerialized, od.sb)
|
||||
require.DeepEqual(t, serBlock, od.bb)
|
||||
require.DeepEqual(t, wst.Fork().CurrentVersion, od.cf.Version[:])
|
||||
require.DeepEqual(t, version.Phase0, od.cf.Fork)
|
||||
require.Equal(t, expectedWSD.Epoch, wsd.Epoch)
|
||||
require.Equal(t, expectedWSD.StateRoot, wsd.StateRoot)
|
||||
require.Equal(t, expectedWSD.BlockRoot, wsd.BlockRoot)
|
||||
}
|
||||
|
||||
// runs downloadBackwardsCompatible directly
|
||||
// and via DownloadOriginData with a round tripper that triggers the backwards compatible code path
|
||||
// runs computeBackwardsCompatible directly
|
||||
// and via ComputeWeakSubjectivityCheckpoint with a round tripper that triggers the backwards compatible code path
|
||||
func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := params.MainnetConfig()
|
||||
@@ -297,16 +293,12 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
|
||||
odPub, err := DownloadOriginData(ctx, c)
|
||||
wsPub, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
|
||||
require.NoError(t, err)
|
||||
|
||||
odPriv, err := downloadBackwardsCompatible(ctx, c)
|
||||
wsPriv, err := computeBackwardsCompatible(ctx, c)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, odPriv.wsd, odPub.wsd)
|
||||
require.DeepEqual(t, odPriv.sb, odPub.sb)
|
||||
require.DeepEqual(t, odPriv.bb, odPub.bb)
|
||||
require.DeepEqual(t, odPriv.cf.Fork, odPub.cf.Fork)
|
||||
require.DeepEqual(t, odPriv.cf.Version, odPub.cf.Version)
|
||||
require.DeepEqual(t, wsPriv, wsPub)
|
||||
}
|
||||
|
||||
func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
|
||||
@@ -402,3 +394,94 @@ func populateValidators(cfg *params.BeaconChainConfig, st state.BeaconState, val
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestDownloadFinalizedData(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := params.MainnetConfig().Copy()
|
||||
|
||||
// avoid the altair zone because genesis tests are easier to set up
|
||||
epoch := cfg.AltairForkEpoch - 1
|
||||
// set up checkpoint state, using the epoch that will be computed as the ws checkpoint state based on the head state
|
||||
slot, err := slots.EpochStart(epoch)
|
||||
require.NoError(t, err)
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
fork, err := forkForEpoch(cfg, epoch)
|
||||
require.NoError(t, st.SetFork(fork))
|
||||
|
||||
// set up checkpoint block
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, wrapper.SetBlockParentRoot(b, cfg.ZeroHash))
|
||||
require.NoError(t, wrapper.SetBlockSlot(b, slot))
|
||||
require.NoError(t, wrapper.SetProposerIndex(b, 0))
|
||||
|
||||
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
|
||||
header, err := b.Header()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestBlockHeader(header.Header))
|
||||
|
||||
// order of operations can be confusing here:
|
||||
// - when computing the state root, make sure block header is complete, EXCEPT the state root should be zero-value
|
||||
// - before computing the block root (to match the request route), the block should include the state root
|
||||
// *computed from the state with a header that does not have a state root set yet*
|
||||
sr, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, wrapper.SetBlockStateRoot(b, sr))
|
||||
mb, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
br, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ms, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
hc := &http.Client{
|
||||
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case renderGetStatePath(IdFinalized):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(ms))
|
||||
case renderGetBlockPath(IdFromRoot(br)):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(mb))
|
||||
default:
|
||||
res.StatusCode = http.StatusInternalServerError
|
||||
res.Body = io.NopCloser(bytes.NewBufferString(""))
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}},
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
|
||||
// sanity check before we go through checkpoint
|
||||
// make sure we can download the state and unmarshal it with the VersionedUnmarshaler
|
||||
sb, err := c.GetState(ctx, IdFinalized)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(sb, ms))
|
||||
vu, err := detect.FromState(sb)
|
||||
require.NoError(t, err)
|
||||
us, err := vu.UnmarshalBeaconState(sb)
|
||||
require.NoError(t, err)
|
||||
ushtr, err := us.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, sr, ushtr)
|
||||
|
||||
expected := &OriginData{
|
||||
sb: ms,
|
||||
bb: mb,
|
||||
br: br,
|
||||
sr: sr,
|
||||
}
|
||||
od, err := DownloadFinalizedData(ctx, c)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expected.sb, od.sb))
|
||||
require.Equal(t, true, bytes.Equal(expected.bb, od.bb))
|
||||
require.Equal(t, expected.br, od.br)
|
||||
require.Equal(t, expected.sr, od.sr)
|
||||
}
|
||||
|
||||
@@ -46,8 +46,9 @@ const (
|
||||
type StateOrBlockId string
|
||||
|
||||
const (
|
||||
IdGenesis StateOrBlockId = "genesis"
|
||||
IdHead StateOrBlockId = "head"
|
||||
IdGenesis StateOrBlockId = "genesis"
|
||||
IdHead StateOrBlockId = "head"
|
||||
IdFinalized StateOrBlockId = "finalized"
|
||||
)
|
||||
|
||||
var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
|
||||
@@ -344,16 +345,6 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + SignedBeaconBlock
|
||||
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
|
||||
// a weak subjectivity checkpoint, or to download a BeaconState+SignedBeaconBlock pair that can be used to bootstrap
|
||||
// a new Beacon Node using Checkpoint Sync.
|
||||
type WeakSubjectivityData struct {
|
||||
BlockRoot [32]byte
|
||||
StateRoot [32]byte
|
||||
Epoch types.Epoch
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
var body string
|
||||
|
||||
41
api/client/builder/BUILD.bazel
Normal file
41
api/client/builder/BUILD.bazel
Normal file
@@ -0,0 +1,41 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"client.go",
|
||||
"errors.go",
|
||||
"types.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/api/client/builder",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"client_test.go",
|
||||
"types_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
261
api/client/builder/client.go
Normal file
261
api/client/builder/client.go
Normal file
@@ -0,0 +1,261 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
getExecHeaderPath = "/eth/v1/builder/header/{{.Slot}}/{{.ParentHash}}/{{.Pubkey}}"
|
||||
getStatus = "/eth/v1/builder/status"
|
||||
postBlindedBeaconBlockPath = "/eth/v1/builder/blinded_blocks"
|
||||
postRegisterValidatorPath = "/eth/v1/builder/validators"
|
||||
)
|
||||
|
||||
var errMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
|
||||
var errMalformedRequest = errors.New("required request data are missing")
|
||||
|
||||
// ClientOpt is a functional option for the Client type (http.Client wrapper)
|
||||
type ClientOpt func(*Client)
|
||||
|
||||
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
|
||||
func WithTimeout(timeout time.Duration) ClientOpt {
|
||||
return func(c *Client) {
|
||||
c.hc.Timeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
type observer interface {
|
||||
observe(r *http.Request) error
|
||||
}
|
||||
|
||||
func WithObserver(m observer) ClientOpt {
|
||||
return func(c *Client) {
|
||||
c.obvs = append(c.obvs, m)
|
||||
}
|
||||
}
|
||||
|
||||
type requestLogger struct{}
|
||||
|
||||
func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
b := bytes.NewBuffer(nil)
|
||||
if r.Body == nil {
|
||||
log.WithFields(log.Fields{
|
||||
"body-base64": "(nil value)",
|
||||
"url": r.URL.String(),
|
||||
}).Info("builder http request")
|
||||
return nil
|
||||
}
|
||||
t := io.TeeReader(r.Body, b)
|
||||
defer func() {
|
||||
if r.Body != nil {
|
||||
e = r.Body.Close()
|
||||
}
|
||||
}()
|
||||
body, err := io.ReadAll(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Body = io.NopCloser(b)
|
||||
log.WithFields(log.Fields{
|
||||
"body-base64": string(body),
|
||||
"url": r.URL.String(),
|
||||
}).Info("builder http request")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ observer = &requestLogger{}
|
||||
|
||||
// Client provides a collection of helper methods for calling Builder API endpoints.
|
||||
type Client struct {
|
||||
hc *http.Client
|
||||
baseURL *url.URL
|
||||
obvs []observer
|
||||
}
|
||||
|
||||
// NewClient constructs a new client with the provided options (ex WithTimeout).
|
||||
// `host` is the base host + port used to construct request urls. This value can be
|
||||
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
|
||||
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
||||
u, err := urlForHost(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
hc: &http.Client{},
|
||||
baseURL: u,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(c)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func urlForHost(h string) (*url.URL, error) {
|
||||
// try to parse as url (being permissive)
|
||||
u, err := url.Parse(h)
|
||||
if err == nil && u.Host != "" {
|
||||
return u, nil
|
||||
}
|
||||
// try to parse as host:port
|
||||
host, port, err := net.SplitHostPort(h)
|
||||
if err != nil {
|
||||
return nil, errMalformedHostname
|
||||
}
|
||||
return &url.URL{Host: net.JoinHostPort(host, port), Scheme: "http"}, nil
|
||||
}
|
||||
|
||||
// NodeURL returns a human-readable string representation of the beacon node base url.
|
||||
func (c *Client) NodeURL() string {
|
||||
return c.baseURL.String()
|
||||
}
|
||||
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
// do is a generic, opinionated GET function to reduce boilerplate amongst the getters in this packageapi/client/builder/types.go.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) ([]byte, error) {
|
||||
u := c.baseURL.ResolveReference(&url.URL{Path: path})
|
||||
log.Printf("requesting %s", u.String())
|
||||
req, err := http.NewRequestWithContext(ctx, method, u.String(), body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(req)
|
||||
}
|
||||
for _, o := range c.obvs {
|
||||
if err := o.observe(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
r, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
err = r.Body.Close()
|
||||
}()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return nil, non200Err(r)
|
||||
}
|
||||
b, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error reading http response body from GetBlock")
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
var execHeaderTemplate = template.Must(template.New("").Parse(getExecHeaderPath))
|
||||
|
||||
func execHeaderPath(slot types.Slot, parentHash [32]byte, pubkey [48]byte) (string, error) {
|
||||
v := struct {
|
||||
Slot types.Slot
|
||||
ParentHash string
|
||||
Pubkey string
|
||||
}{
|
||||
Slot: slot,
|
||||
ParentHash: fmt.Sprintf("%#x", parentHash),
|
||||
Pubkey: fmt.Sprintf("%#x", pubkey),
|
||||
}
|
||||
b := bytes.NewBuffer(nil)
|
||||
err := execHeaderTemplate.Execute(b, v)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error rendering exec header template with slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
}
|
||||
return b.String(), nil
|
||||
}
|
||||
|
||||
// GetHeader is used by a proposing validator to request an ExecutionPayloadHeader from the Builder node.
|
||||
func (c *Client) GetHeader(ctx context.Context, slot types.Slot, parentHash [32]byte, pubkey [48]byte) (*ethpb.SignedBuilderBid, error) {
|
||||
path, err := execHeaderPath(slot, parentHash, pubkey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hb, err := c.do(ctx, http.MethodGet, path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hr := &ExecHeaderResponse{}
|
||||
if err := json.Unmarshal(hb, hr); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
}
|
||||
return hr.ToProto()
|
||||
}
|
||||
|
||||
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
|
||||
// fields with 0x prefixes) and posts to the builder validator registration endpoint.
|
||||
func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error {
|
||||
if len(svr) == 0 {
|
||||
return errors.Wrap(errMalformedRequest, "empty validator registration list")
|
||||
}
|
||||
vs := make([]*SignedValidatorRegistration, len(svr))
|
||||
for i := 0; i < len(svr); i++ {
|
||||
vs[i] = &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr[i]}
|
||||
}
|
||||
body, err := json.Marshal(vs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error encoding the SignedValidatorRegistration value body in RegisterValidator")
|
||||
}
|
||||
_, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body))
|
||||
return err
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
|
||||
// The response is the full ExecutionPayload used to create the blinded block.
|
||||
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error) {
|
||||
v := &SignedBlindedBeaconBlockBellatrix{SignedBlindedBeaconBlockBellatrix: sb}
|
||||
body, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
|
||||
}
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
|
||||
}
|
||||
ep := &ExecPayloadResponse{}
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
|
||||
}
|
||||
return ep.ToProto()
|
||||
}
|
||||
|
||||
// Status asks the remote builder server for a health check. A response of 200 with an empty body is the success/healthy
|
||||
// response, and an error response may have an error message. This method will return a nil value for error in the
|
||||
// happy path, and an error with information about the server response body for a non-200 response.
|
||||
func (c *Client) Status(ctx context.Context) error {
|
||||
_, err := c.do(ctx, http.MethodGet, getStatus, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
var body string
|
||||
if err != nil {
|
||||
body = "(Unable to read response body.)"
|
||||
} else {
|
||||
body = "response body:\n" + string(bodyBytes)
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
switch response.StatusCode {
|
||||
case 404:
|
||||
return errors.Wrap(ErrNotFound, msg)
|
||||
default:
|
||||
return errors.Wrap(ErrNotOK, msg)
|
||||
}
|
||||
}
|
||||
344
api/client/builder/client_test.go
Normal file
344
api/client/builder/client_test.go
Normal file
@@ -0,0 +1,344 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
type roundtrip func(*http.Request) (*http.Response, error)
|
||||
|
||||
func (fn roundtrip) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
return fn(r)
|
||||
}
|
||||
|
||||
func TestClient_Status(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
statusPath := "/eth/v1/builder/status"
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
defer func() {
|
||||
if r.Body == nil {
|
||||
return
|
||||
}
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
require.Equal(t, statusPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBuffer(nil)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
require.NoError(t, c.Status(ctx))
|
||||
hc = &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
defer func() {
|
||||
if r.Body == nil {
|
||||
return
|
||||
}
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
require.Equal(t, statusPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
Body: io.NopCloser(bytes.NewBuffer(nil)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c = &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
require.ErrorIs(t, c.Status(ctx), ErrNotOK)
|
||||
}
|
||||
|
||||
func TestClient_RegisterValidator(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"}}]`
|
||||
expectedPath := "/eth/v1/builder/validators"
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
body, err := io.ReadAll(r.Body)
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedBody, string(body))
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
require.Equal(t, http.MethodPost, r.Method)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBuffer(nil)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
reg := ð.SignedValidatorRegistrationV1{
|
||||
Message: ð.ValidatorRegistrationV1{
|
||||
FeeRecipient: ezDecode(t, params.BeaconConfig().EthBurnAddressHex),
|
||||
GasLimit: 23,
|
||||
Timestamp: 42,
|
||||
Pubkey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
},
|
||||
}
|
||||
require.NoError(t, c.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{reg}))
|
||||
}
|
||||
|
||||
func TestClient_GetHeader(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
Body: io.NopCloser(bytes.NewBuffer(nil)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
var slot types.Slot = 23
|
||||
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
||||
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.ErrorIs(t, err, ErrNotOK)
|
||||
|
||||
hc = &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponse)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c = &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.NoError(t, err)
|
||||
expectedSig := ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505")
|
||||
require.Equal(t, true, bytes.Equal(expectedSig, h.Signature))
|
||||
expectedTxRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.Equal(t, true, bytes.Equal(expectedTxRoot, h.Message.Header.TransactionsRoot))
|
||||
require.Equal(t, uint64(1), h.Message.Header.GasUsed)
|
||||
value := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", h.Message.Value))
|
||||
}
|
||||
|
||||
func TestSubmitBlindedBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
sbbb := testSignedBlindedBeaconBlockBellatrix(t)
|
||||
ep, err := c.SubmitBlindedBlock(ctx, sbbb)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"), ep.ParentHash))
|
||||
bfpg := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
require.Equal(t, fmt.Sprintf("%#x", bfpg.SSZBytes()), fmt.Sprintf("%#x", ep.BaseFeePerGas))
|
||||
require.Equal(t, uint64(1), ep.GasLimit)
|
||||
}
|
||||
|
||||
func testSignedBlindedBeaconBlockBellatrix(t *testing.T) *eth.SignedBlindedBeaconBlockBellatrix {
|
||||
return ð.SignedBlindedBeaconBlockBellatrix{
|
||||
Block: ð.BlindedBeaconBlockBellatrix{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Body: ð.BlindedBeaconBlockBodyBellatrix{
|
||||
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
Eth1Data: ð.Eth1Data{
|
||||
DepositRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
DepositCount: 1,
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Graffiti: ezDecode(t, "0xdeadbeefc0ffee"),
|
||||
ProposerSlashings: []*eth.ProposerSlashing{
|
||||
{
|
||||
Header_1: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
Header_2: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
},
|
||||
AttesterSlashings: []*eth.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
Attestation_2: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Attestations: []*eth.Attestation{
|
||||
{
|
||||
AggregationBits: bitfield.Bitlist{0x01},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
Deposits: []*eth.Deposit{
|
||||
{
|
||||
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
|
||||
Data: ð.Deposit_Data{
|
||||
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Amount: 1,
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
},
|
||||
VoluntaryExits: []*eth.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ð.VoluntaryExit{
|
||||
Epoch: 1,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
SyncAggregate: ð.SyncAggregate{
|
||||
SyncCommitteeSignature: make([]byte, 48),
|
||||
SyncCommitteeBits: bitfield.Bitvector512{0x01},
|
||||
},
|
||||
ExecutionPayloadHeader: ð.ExecutionPayloadHeader{
|
||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
|
||||
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BaseFeePerGas: []byte(strconv.FormatUint(1, 10)),
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
}
|
||||
}
|
||||
|
||||
func TestRequestLogger(t *testing.T) {
|
||||
wo := WithObserver(&requestLogger{})
|
||||
c, err := NewClient("localhost:3500", wo)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, getStatus, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c.hc = hc
|
||||
err = c.Status(ctx)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
10
api/client/builder/errors.go
Normal file
10
api/client/builder/errors.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package builder
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
// ErrNotOK is used to indicate when an HTTP request to the Beacon Node API failed with any non-2xx response code.
|
||||
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
|
||||
var ErrNotOK = errors.New("did not receive 2xx response from API")
|
||||
|
||||
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
|
||||
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
|
||||
1
api/client/builder/testdata/blinded-block.json
vendored
Normal file
1
api/client/builder/testdata/blinded-block.json
vendored
Normal file
File diff suppressed because one or more lines are too long
1
api/client/builder/testdata/execution-payload.json
vendored
Normal file
1
api/client/builder/testdata/execution-payload.json
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"14074904626401341155369551180448584754667373453244490859944217516317499064576","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}
|
||||
585
api/client/builder/types.go
Normal file
585
api/client/builder/types.go
Normal file
@@ -0,0 +1,585 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
type SignedValidatorRegistration struct {
|
||||
*eth.SignedValidatorRegistrationV1
|
||||
}
|
||||
|
||||
type ValidatorRegistration struct {
|
||||
*eth.ValidatorRegistrationV1
|
||||
}
|
||||
|
||||
func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *ValidatorRegistration `json:"message,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{
|
||||
Message: &ValidatorRegistration{r.Message},
|
||||
Signature: r.SignedValidatorRegistrationV1.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
|
||||
GasLimit string `json:"gas_limit,omitempty"`
|
||||
Timestamp string `json:"timestamp,omitempty"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
|
||||
}{
|
||||
FeeRecipient: r.FeeRecipient,
|
||||
GasLimit: fmt.Sprintf("%d", r.GasLimit),
|
||||
Timestamp: fmt.Sprintf("%d", r.Timestamp),
|
||||
Pubkey: r.Pubkey,
|
||||
})
|
||||
}
|
||||
|
||||
type Uint256 struct {
|
||||
*big.Int
|
||||
}
|
||||
|
||||
func stringToUint256(s string) Uint256 {
|
||||
bi := new(big.Int)
|
||||
bi.SetString(s, 10)
|
||||
return Uint256{Int: bi}
|
||||
}
|
||||
|
||||
// sszBytesToUint256 creates a Uint256 from a ssz-style (little-endian byte slice) representation.
|
||||
func sszBytesToUint256(b []byte) Uint256 {
|
||||
bi := new(big.Int)
|
||||
return Uint256{Int: bi.SetBytes(bytesutil.ReverseByteOrder(b))}
|
||||
}
|
||||
|
||||
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256
|
||||
func (s Uint256) SSZBytes() []byte {
|
||||
return bytesutil.PadTo(bytesutil.ReverseByteOrder(s.Int.Bytes()), 32)
|
||||
}
|
||||
|
||||
var errUnmarshalUint256Failed = errors.New("unable to UnmarshalText into a Uint256 value")
|
||||
|
||||
func (s *Uint256) UnmarshalJSON(t []byte) error {
|
||||
start := 0
|
||||
end := len(t)
|
||||
if t[0] == '"' {
|
||||
start += 1
|
||||
}
|
||||
if t[end-1] == '"' {
|
||||
end -= 1
|
||||
}
|
||||
return s.UnmarshalText(t[start:end])
|
||||
}
|
||||
|
||||
func (s *Uint256) UnmarshalText(t []byte) error {
|
||||
if s.Int == nil {
|
||||
s.Int = big.NewInt(0)
|
||||
}
|
||||
z, ok := s.SetString(string(t), 10)
|
||||
if !ok {
|
||||
return errors.Wrapf(errUnmarshalUint256Failed, "value=%s", string(t))
|
||||
}
|
||||
s.Int = z
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s Uint256) MarshalJSON() ([]byte, error) {
|
||||
t, err := s.MarshalText()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t = append([]byte{'"'}, t...)
|
||||
t = append(t, '"')
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (s Uint256) MarshalText() ([]byte, error) {
|
||||
return []byte(s.String()), nil
|
||||
}
|
||||
|
||||
type Uint64String uint64
|
||||
|
||||
func (s *Uint64String) UnmarshalText(t []byte) error {
|
||||
u, err := strconv.ParseUint(string(t), 10, 64)
|
||||
*s = Uint64String(u)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s Uint64String) MarshalText() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("%d", s)), nil
|
||||
}
|
||||
|
||||
type ExecHeaderResponse struct {
|
||||
Version string `json:"version,omitempty"`
|
||||
Data struct {
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
Message *BuilderBid `json:"message,omitempty"`
|
||||
} `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
func (ehr *ExecHeaderResponse) ToProto() (*eth.SignedBuilderBid, error) {
|
||||
bb, err := ehr.Data.Message.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ð.SignedBuilderBid{
|
||||
Message: bb,
|
||||
Signature: ehr.Data.Signature,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bb *BuilderBid) ToProto() (*eth.BuilderBid, error) {
|
||||
header, err := bb.Header.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ð.BuilderBid{
|
||||
Header: header,
|
||||
Value: bb.Value.SSZBytes(),
|
||||
Pubkey: bb.Pubkey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *ExecutionPayloadHeader) ToProto() (*eth.ExecutionPayloadHeader, error) {
|
||||
return ð.ExecutionPayloadHeader{
|
||||
ParentHash: h.ParentHash,
|
||||
FeeRecipient: h.FeeRecipient,
|
||||
StateRoot: h.StateRoot,
|
||||
ReceiptsRoot: h.ReceiptsRoot,
|
||||
LogsBloom: h.LogsBloom,
|
||||
PrevRandao: h.PrevRandao,
|
||||
BlockNumber: uint64(h.BlockNumber),
|
||||
GasLimit: uint64(h.GasLimit),
|
||||
GasUsed: uint64(h.GasUsed),
|
||||
Timestamp: uint64(h.Timestamp),
|
||||
ExtraData: h.ExtraData,
|
||||
BaseFeePerGas: h.BaseFeePerGas.SSZBytes(),
|
||||
BlockHash: h.BlockHash,
|
||||
TransactionsRoot: h.TransactionsRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type BuilderBid struct {
|
||||
Header *ExecutionPayloadHeader `json:"header,omitempty"`
|
||||
Value Uint256 `json:"value,omitempty"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeader struct {
|
||||
ParentHash hexutil.Bytes `json:"parent_hash,omitempty"`
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
|
||||
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
|
||||
ReceiptsRoot hexutil.Bytes `json:"receipts_root,omitempty"`
|
||||
LogsBloom hexutil.Bytes `json:"logs_bloom,omitempty"`
|
||||
PrevRandao hexutil.Bytes `json:"prev_randao,omitempty"`
|
||||
BlockNumber Uint64String `json:"block_number,omitempty"`
|
||||
GasLimit Uint64String `json:"gas_limit,omitempty"`
|
||||
GasUsed Uint64String `json:"gas_used,omitempty"`
|
||||
Timestamp Uint64String `json:"timestamp,omitempty"`
|
||||
ExtraData hexutil.Bytes `json:"extra_data,omitempty"`
|
||||
BaseFeePerGas Uint256 `json:"base_fee_per_gas,omitempty"`
|
||||
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
|
||||
TransactionsRoot hexutil.Bytes `json:"transactions_root,omitempty"`
|
||||
*eth.ExecutionPayloadHeader
|
||||
}
|
||||
|
||||
func (h *ExecutionPayloadHeader) MarshalJSON() ([]byte, error) {
|
||||
type MarshalCaller ExecutionPayloadHeader
|
||||
return json.Marshal(&MarshalCaller{
|
||||
ParentHash: h.ExecutionPayloadHeader.ParentHash,
|
||||
FeeRecipient: h.ExecutionPayloadHeader.FeeRecipient,
|
||||
StateRoot: h.ExecutionPayloadHeader.StateRoot,
|
||||
ReceiptsRoot: h.ExecutionPayloadHeader.ReceiptsRoot,
|
||||
LogsBloom: h.ExecutionPayloadHeader.LogsBloom,
|
||||
PrevRandao: h.ExecutionPayloadHeader.PrevRandao,
|
||||
BlockNumber: Uint64String(h.ExecutionPayloadHeader.BlockNumber),
|
||||
GasLimit: Uint64String(h.ExecutionPayloadHeader.GasLimit),
|
||||
GasUsed: Uint64String(h.ExecutionPayloadHeader.GasUsed),
|
||||
Timestamp: Uint64String(h.ExecutionPayloadHeader.Timestamp),
|
||||
ExtraData: h.ExecutionPayloadHeader.ExtraData,
|
||||
BaseFeePerGas: sszBytesToUint256(h.ExecutionPayloadHeader.BaseFeePerGas),
|
||||
BlockHash: h.ExecutionPayloadHeader.BlockHash,
|
||||
TransactionsRoot: h.ExecutionPayloadHeader.TransactionsRoot,
|
||||
})
|
||||
}
|
||||
|
||||
func (h *ExecutionPayloadHeader) UnmarshalJSON(b []byte) error {
|
||||
type UnmarshalCaller ExecutionPayloadHeader
|
||||
uc := &UnmarshalCaller{}
|
||||
if err := json.Unmarshal(b, uc); err != nil {
|
||||
return err
|
||||
}
|
||||
ep := ExecutionPayloadHeader(*uc)
|
||||
*h = ep
|
||||
var err error
|
||||
h.ExecutionPayloadHeader, err = h.ToProto()
|
||||
return err
|
||||
}
|
||||
|
||||
type ExecPayloadResponse struct {
|
||||
Version string `json:"version,omitempty"`
|
||||
Data ExecutionPayload `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
type ExecutionPayload struct {
|
||||
ParentHash hexutil.Bytes `json:"parent_hash,omitempty"`
|
||||
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
|
||||
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
|
||||
ReceiptsRoot hexutil.Bytes `json:"receipts_root,omitempty"`
|
||||
LogsBloom hexutil.Bytes `json:"logs_bloom,omitempty"`
|
||||
PrevRandao hexutil.Bytes `json:"prev_randao,omitempty"`
|
||||
BlockNumber Uint64String `json:"block_number,omitempty"`
|
||||
GasLimit Uint64String `json:"gas_limit,omitempty"`
|
||||
GasUsed Uint64String `json:"gas_used,omitempty"`
|
||||
Timestamp Uint64String `json:"timestamp,omitempty"`
|
||||
ExtraData hexutil.Bytes `json:"extra_data,omitempty"`
|
||||
BaseFeePerGas Uint256 `json:"base_fee_per_gas,omitempty"`
|
||||
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
|
||||
Transactions []hexutil.Bytes `json:"transactions,omitempty"`
|
||||
}
|
||||
|
||||
func (r *ExecPayloadResponse) ToProto() (*v1.ExecutionPayload, error) {
|
||||
return r.Data.ToProto()
|
||||
}
|
||||
|
||||
func (p *ExecutionPayload) ToProto() (*v1.ExecutionPayload, error) {
|
||||
txs := make([][]byte, len(p.Transactions))
|
||||
for i := range p.Transactions {
|
||||
txs[i] = p.Transactions[i]
|
||||
}
|
||||
return &v1.ExecutionPayload{
|
||||
ParentHash: p.ParentHash,
|
||||
FeeRecipient: p.FeeRecipient,
|
||||
StateRoot: p.StateRoot,
|
||||
ReceiptsRoot: p.ReceiptsRoot,
|
||||
LogsBloom: p.LogsBloom,
|
||||
PrevRandao: p.PrevRandao,
|
||||
BlockNumber: uint64(p.BlockNumber),
|
||||
GasLimit: uint64(p.GasLimit),
|
||||
GasUsed: uint64(p.GasUsed),
|
||||
Timestamp: uint64(p.Timestamp),
|
||||
ExtraData: p.ExtraData,
|
||||
BaseFeePerGas: p.BaseFeePerGas.SSZBytes(),
|
||||
BlockHash: p.BlockHash,
|
||||
Transactions: txs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockBellatrix struct {
|
||||
*eth.SignedBlindedBeaconBlockBellatrix
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBellatrix struct {
|
||||
*eth.BlindedBeaconBlockBellatrix
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyBellatrix struct {
|
||||
*eth.BlindedBeaconBlockBodyBellatrix
|
||||
}
|
||||
|
||||
func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *BlindedBeaconBlockBellatrix `json:"message,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{
|
||||
Message: &BlindedBeaconBlockBellatrix{r.SignedBlindedBeaconBlockBellatrix.Block},
|
||||
Signature: r.SignedBlindedBeaconBlockBellatrix.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
func (b *BlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index,omitempty"`
|
||||
ParentRoot hexutil.Bytes `json:"parent_root,omitempty"`
|
||||
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
|
||||
Body *BlindedBeaconBlockBodyBellatrix `json:"body,omitempty"`
|
||||
}{
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
||||
ParentRoot: b.ParentRoot,
|
||||
StateRoot: b.StateRoot,
|
||||
Body: &BlindedBeaconBlockBodyBellatrix{b.BlindedBeaconBlockBellatrix.Body},
|
||||
})
|
||||
}
|
||||
|
||||
type ProposerSlashing struct {
|
||||
*eth.ProposerSlashing
|
||||
}
|
||||
|
||||
func (s *ProposerSlashing) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
SignedHeader1 *SignedBeaconBlockHeader `json:"signed_header_1,omitempty"`
|
||||
SignedHeader2 *SignedBeaconBlockHeader `json:"signed_header_2,omitempty"`
|
||||
}{
|
||||
SignedHeader1: &SignedBeaconBlockHeader{s.ProposerSlashing.Header_1},
|
||||
SignedHeader2: &SignedBeaconBlockHeader{s.ProposerSlashing.Header_2},
|
||||
})
|
||||
}
|
||||
|
||||
type SignedBeaconBlockHeader struct {
|
||||
*eth.SignedBeaconBlockHeader
|
||||
}
|
||||
|
||||
func (h *SignedBeaconBlockHeader) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Header *BeaconBlockHeader `json:"message,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{
|
||||
Header: &BeaconBlockHeader{h.SignedBeaconBlockHeader.Header},
|
||||
Signature: h.SignedBeaconBlockHeader.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
type BeaconBlockHeader struct {
|
||||
*eth.BeaconBlockHeader
|
||||
}
|
||||
|
||||
func (h *BeaconBlockHeader) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Slot string `json:"slot,omitempty"`
|
||||
ProposerIndex string `json:"proposer_index,omitempty"`
|
||||
ParentRoot hexutil.Bytes `json:"parent_root,omitempty"`
|
||||
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
|
||||
BodyRoot hexutil.Bytes `json:"body_root,omitempty"`
|
||||
}{
|
||||
Slot: fmt.Sprintf("%d", h.BeaconBlockHeader.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", h.BeaconBlockHeader.ProposerIndex),
|
||||
ParentRoot: h.BeaconBlockHeader.ParentRoot,
|
||||
StateRoot: h.BeaconBlockHeader.StateRoot,
|
||||
BodyRoot: h.BeaconBlockHeader.BodyRoot,
|
||||
})
|
||||
}
|
||||
|
||||
type IndexedAttestation struct {
|
||||
*eth.IndexedAttestation
|
||||
}
|
||||
|
||||
func (a *IndexedAttestation) MarshalJSON() ([]byte, error) {
|
||||
indices := make([]string, len(a.IndexedAttestation.AttestingIndices))
|
||||
for i := range a.IndexedAttestation.AttestingIndices {
|
||||
indices[i] = fmt.Sprintf("%d", a.AttestingIndices[i])
|
||||
}
|
||||
return json.Marshal(struct {
|
||||
AttestingIndices []string `json:"attesting_indices,omitempty"`
|
||||
Data *AttestationData `json:"data,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{
|
||||
AttestingIndices: indices,
|
||||
Data: &AttestationData{a.IndexedAttestation.Data},
|
||||
Signature: a.IndexedAttestation.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
type AttesterSlashing struct {
|
||||
*eth.AttesterSlashing
|
||||
}
|
||||
|
||||
func (s *AttesterSlashing) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Attestation1 *IndexedAttestation `json:"attestation_1,omitempty"`
|
||||
Attestation2 *IndexedAttestation `json:"attestation_2,omitempty"`
|
||||
}{
|
||||
Attestation1: &IndexedAttestation{s.Attestation_1},
|
||||
Attestation2: &IndexedAttestation{s.Attestation_2},
|
||||
})
|
||||
}
|
||||
|
||||
type Checkpoint struct {
|
||||
*eth.Checkpoint
|
||||
}
|
||||
|
||||
func (c *Checkpoint) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Epoch string `json:"epoch,omitempty"`
|
||||
Root hexutil.Bytes `json:"root,omitempty"`
|
||||
}{
|
||||
Epoch: fmt.Sprintf("%d", c.Checkpoint.Epoch),
|
||||
Root: c.Checkpoint.Root,
|
||||
})
|
||||
}
|
||||
|
||||
type AttestationData struct {
|
||||
*eth.AttestationData
|
||||
}
|
||||
|
||||
func (a *AttestationData) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Slot string `json:"slot,omitempty"`
|
||||
Index string `json:"index,omitempty"`
|
||||
BeaconBlockRoot hexutil.Bytes `json:"beacon_block_root,omitempty"`
|
||||
Source *Checkpoint `json:"source,omitempty"`
|
||||
Target *Checkpoint `json:"target,omitempty"`
|
||||
}{
|
||||
Slot: fmt.Sprintf("%d", a.AttestationData.Slot),
|
||||
Index: fmt.Sprintf("%d", a.AttestationData.CommitteeIndex),
|
||||
BeaconBlockRoot: a.AttestationData.BeaconBlockRoot,
|
||||
Source: &Checkpoint{a.AttestationData.Source},
|
||||
Target: &Checkpoint{a.AttestationData.Target},
|
||||
})
|
||||
}
|
||||
|
||||
type Attestation struct {
|
||||
*eth.Attestation
|
||||
}
|
||||
|
||||
func (a *Attestation) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
AggregationBits hexutil.Bytes `json:"aggregation_bits,omitempty"`
|
||||
Data *AttestationData `json:"data,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty" ssz-size:"96"`
|
||||
}{
|
||||
AggregationBits: hexutil.Bytes(a.Attestation.AggregationBits),
|
||||
Data: &AttestationData{a.Attestation.Data},
|
||||
Signature: a.Attestation.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
type DepositData struct {
|
||||
*eth.Deposit_Data
|
||||
}
|
||||
|
||||
func (d *DepositData) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
PublicKey hexutil.Bytes `json:"pubkey,omitempty"`
|
||||
WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials,omitempty"`
|
||||
Amount string `json:"amount,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{
|
||||
PublicKey: d.PublicKey,
|
||||
WithdrawalCredentials: d.WithdrawalCredentials,
|
||||
Amount: fmt.Sprintf("%d", d.Amount),
|
||||
Signature: d.Signature,
|
||||
})
|
||||
}
|
||||
|
||||
type Deposit struct {
|
||||
*eth.Deposit
|
||||
}
|
||||
|
||||
func (d *Deposit) MarshalJSON() ([]byte, error) {
|
||||
proof := make([]hexutil.Bytes, len(d.Proof))
|
||||
for i := range d.Proof {
|
||||
proof[i] = d.Proof[i]
|
||||
}
|
||||
return json.Marshal(struct {
|
||||
Proof []hexutil.Bytes `json:"proof"`
|
||||
Data *DepositData `json:"data"`
|
||||
}{
|
||||
Proof: proof,
|
||||
Data: &DepositData{Deposit_Data: d.Deposit.Data},
|
||||
})
|
||||
}
|
||||
|
||||
type SignedVoluntaryExit struct {
|
||||
*eth.SignedVoluntaryExit
|
||||
}
|
||||
|
||||
func (sve *SignedVoluntaryExit) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Message *VoluntaryExit `json:"message,omitempty"`
|
||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
||||
}{
|
||||
Signature: sve.SignedVoluntaryExit.Signature,
|
||||
Message: &VoluntaryExit{sve.SignedVoluntaryExit.Exit},
|
||||
})
|
||||
}
|
||||
|
||||
type VoluntaryExit struct {
|
||||
*eth.VoluntaryExit
|
||||
}
|
||||
|
||||
func (ve *VoluntaryExit) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Epoch string `json:"epoch,omitempty"`
|
||||
ValidatorIndex string `json:"validator_index,omitempty"`
|
||||
}{
|
||||
Epoch: fmt.Sprintf("%d", ve.Epoch),
|
||||
ValidatorIndex: fmt.Sprintf("%d", ve.ValidatorIndex),
|
||||
})
|
||||
}
|
||||
|
||||
type SyncAggregate struct {
|
||||
*eth.SyncAggregate
|
||||
}
|
||||
|
||||
func (s *SyncAggregate) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
SyncCommitteeBits hexutil.Bytes `json:"sync_committee_bits,omitempty"`
|
||||
SyncCommitteeSignature hexutil.Bytes `json:"sync_committee_signature,omitempty"`
|
||||
}{
|
||||
SyncCommitteeBits: hexutil.Bytes(s.SyncAggregate.SyncCommitteeBits),
|
||||
SyncCommitteeSignature: s.SyncAggregate.SyncCommitteeSignature,
|
||||
})
|
||||
}
|
||||
|
||||
type Eth1Data struct {
|
||||
*eth.Eth1Data
|
||||
}
|
||||
|
||||
func (e *Eth1Data) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
DepositRoot hexutil.Bytes `json:"deposit_root,omitempty"`
|
||||
DepositCount string `json:"deposit_count,omitempty"`
|
||||
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
|
||||
}{
|
||||
DepositRoot: e.DepositRoot,
|
||||
DepositCount: fmt.Sprintf("%d", e.DepositCount),
|
||||
BlockHash: e.BlockHash,
|
||||
})
|
||||
}
|
||||
|
||||
func (b *BlindedBeaconBlockBodyBellatrix) MarshalJSON() ([]byte, error) {
|
||||
sve := make([]*SignedVoluntaryExit, len(b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits {
|
||||
sve[i] = &SignedVoluntaryExit{SignedVoluntaryExit: b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits[i]}
|
||||
}
|
||||
deps := make([]*Deposit, len(b.BlindedBeaconBlockBodyBellatrix.Deposits))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.Deposits {
|
||||
deps[i] = &Deposit{Deposit: b.BlindedBeaconBlockBodyBellatrix.Deposits[i]}
|
||||
}
|
||||
atts := make([]*Attestation, len(b.BlindedBeaconBlockBodyBellatrix.Attestations))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.Attestations {
|
||||
atts[i] = &Attestation{Attestation: b.BlindedBeaconBlockBodyBellatrix.Attestations[i]}
|
||||
}
|
||||
atsl := make([]*AttesterSlashing, len(b.BlindedBeaconBlockBodyBellatrix.AttesterSlashings))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.AttesterSlashings {
|
||||
atsl[i] = &AttesterSlashing{AttesterSlashing: b.BlindedBeaconBlockBodyBellatrix.AttesterSlashings[i]}
|
||||
}
|
||||
pros := make([]*ProposerSlashing, len(b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings))
|
||||
for i := range b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings {
|
||||
pros[i] = &ProposerSlashing{ProposerSlashing: b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings[i]}
|
||||
}
|
||||
return json.Marshal(struct {
|
||||
RandaoReveal hexutil.Bytes `json:"randao_reveal,omitempty"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data,omitempty"`
|
||||
Graffiti hexutil.Bytes `json:"graffiti,omitempty"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings,omitempty"`
|
||||
AttesterSlashings []*AttesterSlashing `json:"attester_slashings,omitempty"`
|
||||
Attestations []*Attestation `json:"attestations,omitempty"`
|
||||
Deposits []*Deposit `json:"deposits,omitempty"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits,omitempty"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate,omitempty"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header,omitempty"`
|
||||
}{
|
||||
RandaoReveal: b.RandaoReveal,
|
||||
Eth1Data: &Eth1Data{b.BlindedBeaconBlockBodyBellatrix.Eth1Data},
|
||||
Graffiti: b.BlindedBeaconBlockBodyBellatrix.Graffiti,
|
||||
ProposerSlashings: pros,
|
||||
AttesterSlashings: atsl,
|
||||
Attestations: atts,
|
||||
Deposits: deps,
|
||||
VoluntaryExits: sve,
|
||||
SyncAggregate: &SyncAggregate{b.BlindedBeaconBlockBodyBellatrix.SyncAggregate},
|
||||
ExecutionPayloadHeader: &ExecutionPayloadHeader{ExecutionPayloadHeader: b.BlindedBeaconBlockBodyBellatrix.ExecutionPayloadHeader},
|
||||
})
|
||||
}
|
||||
727
api/client/builder/types_test.go
Normal file
727
api/client/builder/types_test.go
Normal file
@@ -0,0 +1,727 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func ezDecode(t *testing.T, s string) []byte {
|
||||
v, err := hexutil.Decode(s)
|
||||
require.NoError(t, err)
|
||||
return v
|
||||
}
|
||||
|
||||
func TestSignedValidatorRegistration_MarshalJSON(t *testing.T) {
|
||||
svr := ð.SignedValidatorRegistrationV1{
|
||||
Message: ð.ValidatorRegistrationV1{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
GasLimit: 0,
|
||||
Timestamp: 0,
|
||||
Pubkey: make([]byte, 48),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
je, err := json.Marshal(&SignedValidatorRegistration{SignedValidatorRegistrationV1: svr})
|
||||
require.NoError(t, err)
|
||||
// decode with a struct w/ plain strings so we can check the string encoding of the hex fields
|
||||
un := struct {
|
||||
Message struct {
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
Pubkey string `json:"pubkey"`
|
||||
} `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}{}
|
||||
require.NoError(t, json.Unmarshal(je, &un))
|
||||
require.Equal(t, "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", un.Signature)
|
||||
require.Equal(t, "0x0000000000000000000000000000000000000000", un.Message.FeeRecipient)
|
||||
require.Equal(t, "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", un.Message.Pubkey)
|
||||
}
|
||||
|
||||
var testExampleHeaderResponse = `{
|
||||
"version": "bellatrix",
|
||||
"data": {
|
||||
"message": {
|
||||
"header": {
|
||||
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"block_number": "1",
|
||||
"gas_limit": "1",
|
||||
"gas_used": "1",
|
||||
"timestamp": "1",
|
||||
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
},
|
||||
"value": "652312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
},
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
}
|
||||
}`
|
||||
|
||||
func TestExecutionHeaderResponseUnmarshal(t *testing.T) {
|
||||
hr := &ExecHeaderResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponse), hr))
|
||||
cases := []struct {
|
||||
expected string
|
||||
actual string
|
||||
name string
|
||||
}{
|
||||
{
|
||||
expected: "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505",
|
||||
actual: hexutil.Encode(hr.Data.Signature),
|
||||
name: "Signature",
|
||||
},
|
||||
{
|
||||
expected: "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
|
||||
actual: hexutil.Encode(hr.Data.Message.Pubkey),
|
||||
name: "ExecHeaderResponse.Pubkey",
|
||||
},
|
||||
{
|
||||
expected: "652312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
actual: hr.Data.Message.Value.String(),
|
||||
name: "ExecHeaderResponse.Value",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.ParentHash),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.ParentHash",
|
||||
},
|
||||
{
|
||||
expected: "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.FeeRecipient),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.FeeRecipient",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.StateRoot),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.StateRoot",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.ReceiptsRoot),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.ReceiptsRoot",
|
||||
},
|
||||
{
|
||||
expected: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.LogsBloom),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.LogsBloom",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.PrevRandao),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.PrevRandao",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.BlockNumber),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.BlockNumber",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.GasLimit),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.GasLimit",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.GasUsed),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.GasUsed",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.Timestamp),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.Timestamp",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.ExtraData),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.ExtraData",
|
||||
},
|
||||
{
|
||||
expected: "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
actual: fmt.Sprintf("%d", hr.Data.Message.Header.BaseFeePerGas),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.BaseFeePerGas",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.BlockHash),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.BlockHash",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(hr.Data.Message.Header.TransactionsRoot),
|
||||
name: "ExecHeaderResponse.ExecutionPayloadHeader.TransactionsRoot",
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
require.Equal(t, c.expected, c.actual, fmt.Sprintf("unexpected value for field %s", c.name))
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecutionHeaderResponseToProto(t *testing.T) {
|
||||
bfpg := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
v := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
hr := &ExecHeaderResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponse), hr))
|
||||
p, err := hr.ToProto()
|
||||
require.NoError(t, err)
|
||||
signature, err := hexutil.Decode("0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505")
|
||||
require.NoError(t, err)
|
||||
pubkey, err := hexutil.Decode("0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
||||
require.NoError(t, err)
|
||||
parentHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
feeRecipient, err := hexutil.Decode("0xabcf8e0d4e9587369b2301d0790347320302cc09")
|
||||
require.NoError(t, err)
|
||||
stateRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
receiptsRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
logsBloom, err := hexutil.Decode("0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||
require.NoError(t, err)
|
||||
prevRandao, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
extraData, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
blockHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
txRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := ð.SignedBuilderBid{
|
||||
Message: ð.BuilderBid{
|
||||
Header: ð.ExecutionPayloadHeader{
|
||||
ParentHash: parentHash,
|
||||
FeeRecipient: feeRecipient,
|
||||
StateRoot: stateRoot,
|
||||
ReceiptsRoot: receiptsRoot,
|
||||
LogsBloom: logsBloom,
|
||||
PrevRandao: prevRandao,
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: extraData,
|
||||
BaseFeePerGas: bfpg.SSZBytes(),
|
||||
BlockHash: blockHash,
|
||||
TransactionsRoot: txRoot,
|
||||
},
|
||||
Value: v.SSZBytes(),
|
||||
Pubkey: pubkey,
|
||||
},
|
||||
Signature: signature,
|
||||
}
|
||||
require.DeepEqual(t, expected, p)
|
||||
}
|
||||
|
||||
var testExampleExecutionPayload = `{
|
||||
"version": "bellatrix",
|
||||
"data": {
|
||||
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"block_number": "1",
|
||||
"gas_limit": "1",
|
||||
"gas_used": "1",
|
||||
"timestamp": "1",
|
||||
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"transactions": [
|
||||
"0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
|
||||
]
|
||||
}
|
||||
}`
|
||||
|
||||
func TestExecutionPayloadResponseUnmarshal(t *testing.T) {
|
||||
epr := &ExecPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), epr))
|
||||
cases := []struct {
|
||||
expected string
|
||||
actual string
|
||||
name string
|
||||
}{
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ParentHash),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ParentHash",
|
||||
},
|
||||
{
|
||||
expected: "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
actual: hexutil.Encode(epr.Data.FeeRecipient),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.FeeRecipient",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.StateRoot),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.StateRoot",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ReceiptsRoot),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ReceiptsRoot",
|
||||
},
|
||||
{
|
||||
expected: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
actual: hexutil.Encode(epr.Data.LogsBloom),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.LogsBloom",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.PrevRandao),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.PrevRandao",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.BlockNumber),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BlockNumber",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.GasLimit),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.GasLimit",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.GasUsed),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.GasUsed",
|
||||
},
|
||||
{
|
||||
expected: "1",
|
||||
actual: fmt.Sprintf("%d", epr.Data.Timestamp),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.Timestamp",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.ExtraData),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.ExtraData",
|
||||
},
|
||||
{
|
||||
expected: "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
actual: fmt.Sprintf("%d", epr.Data.BaseFeePerGas),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BaseFeePerGas",
|
||||
},
|
||||
{
|
||||
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
actual: hexutil.Encode(epr.Data.BlockHash),
|
||||
name: "ExecPayloadResponse.ExecutionPayload.BlockHash",
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
require.Equal(t, c.expected, c.actual, fmt.Sprintf("unexpected value for field %s", c.name))
|
||||
}
|
||||
require.Equal(t, 1, len(epr.Data.Transactions))
|
||||
txHash := "0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
|
||||
require.Equal(t, txHash, hexutil.Encode(epr.Data.Transactions[0]))
|
||||
}
|
||||
|
||||
func TestExecutionPayloadResponseToProto(t *testing.T) {
|
||||
hr := &ExecPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), hr))
|
||||
p, err := hr.ToProto()
|
||||
require.NoError(t, err)
|
||||
|
||||
parentHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
feeRecipient, err := hexutil.Decode("0xabcf8e0d4e9587369b2301d0790347320302cc09")
|
||||
require.NoError(t, err)
|
||||
stateRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
receiptsRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
logsBloom, err := hexutil.Decode("0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||
require.NoError(t, err)
|
||||
prevRandao, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
extraData, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
blockHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
require.NoError(t, err)
|
||||
|
||||
tx, err := hexutil.Decode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")
|
||||
require.NoError(t, err)
|
||||
txList := [][]byte{tx}
|
||||
|
||||
bfpg := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
expected := &v1.ExecutionPayload{
|
||||
ParentHash: parentHash,
|
||||
FeeRecipient: feeRecipient,
|
||||
StateRoot: stateRoot,
|
||||
ReceiptsRoot: receiptsRoot,
|
||||
LogsBloom: logsBloom,
|
||||
PrevRandao: prevRandao,
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: extraData,
|
||||
BaseFeePerGas: bfpg.SSZBytes(),
|
||||
BlockHash: blockHash,
|
||||
Transactions: txList,
|
||||
}
|
||||
require.DeepEqual(t, expected, p)
|
||||
}
|
||||
|
||||
func pbEth1Data() *eth.Eth1Data {
|
||||
return ð.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
DepositCount: 23,
|
||||
BlockHash: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
|
||||
func TestEth1DataMarshal(t *testing.T) {
|
||||
ed := &Eth1Data{
|
||||
Eth1Data: pbEth1Data(),
|
||||
}
|
||||
b, err := json.Marshal(ed)
|
||||
require.NoError(t, err)
|
||||
expected := `{"deposit_root":"0x0000000000000000000000000000000000000000000000000000000000000000","deposit_count":"23","block_hash":"0x0000000000000000000000000000000000000000000000000000000000000000"}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbSyncAggregate() *eth.SyncAggregate {
|
||||
return ð.SyncAggregate{
|
||||
SyncCommitteeSignature: make([]byte, 48),
|
||||
SyncCommitteeBits: bitfield.Bitvector512{0x01},
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncAggregate_MarshalJSON(t *testing.T) {
|
||||
sa := &SyncAggregate{pbSyncAggregate()}
|
||||
b, err := json.Marshal(sa)
|
||||
require.NoError(t, err)
|
||||
expected := `{"sync_committee_bits":"0x01","sync_committee_signature":"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbDeposit(t *testing.T) *eth.Deposit {
|
||||
return ð.Deposit{
|
||||
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
|
||||
Data: ð.Deposit_Data{
|
||||
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Amount: 1,
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeposit_MarshalJSON(t *testing.T) {
|
||||
d := &Deposit{
|
||||
Deposit: pbDeposit(t),
|
||||
}
|
||||
b, err := json.Marshal(d)
|
||||
require.NoError(t, err)
|
||||
expected := `{"proof":["0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"],"data":{"pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a","withdrawal_credentials":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","amount":"1","signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbSignedVoluntaryExit(t *testing.T) *eth.SignedVoluntaryExit {
|
||||
return ð.SignedVoluntaryExit{
|
||||
Exit: ð.VoluntaryExit{
|
||||
Epoch: 1,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
}
|
||||
}
|
||||
|
||||
func TestVoluntaryExit(t *testing.T) {
|
||||
ve := &SignedVoluntaryExit{
|
||||
SignedVoluntaryExit: pbSignedVoluntaryExit(t),
|
||||
}
|
||||
b, err := json.Marshal(ve)
|
||||
require.NoError(t, err)
|
||||
expected := `{"message":{"epoch":"1","validator_index":"1"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbAttestation(t *testing.T) *eth.Attestation {
|
||||
return ð.Attestation{
|
||||
AggregationBits: bitfield.Bitlist{0x01},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttestationMarshal(t *testing.T) {
|
||||
a := &Attestation{
|
||||
Attestation: pbAttestation(t),
|
||||
}
|
||||
b, err := json.Marshal(a)
|
||||
require.NoError(t, err)
|
||||
expected := `{"aggregation_bits":"0x01","data":{"slot":"1","index":"1","beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","source":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"target":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbAttesterSlashing(t *testing.T) *eth.AttesterSlashing {
|
||||
return ð.AttesterSlashing{
|
||||
Attestation_1: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Attestation_2: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{1},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttesterSlashing_MarshalJSON(t *testing.T) {
|
||||
as := &AttesterSlashing{
|
||||
AttesterSlashing: pbAttesterSlashing(t),
|
||||
}
|
||||
b, err := json.Marshal(as)
|
||||
require.NoError(t, err)
|
||||
expected := `{"attestation_1":{"attesting_indices":["1"],"data":{"slot":"1","index":"1","beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","source":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"target":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"},"attestation_2":{"attesting_indices":["1"],"data":{"slot":"1","index":"1","beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","source":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"target":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbProposerSlashing(t *testing.T) *eth.ProposerSlashing {
|
||||
return ð.ProposerSlashing{
|
||||
Header_1: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
Header_2: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestProposerSlashings(t *testing.T) {
|
||||
ps := &ProposerSlashing{ProposerSlashing: pbProposerSlashing(t)}
|
||||
b, err := json.Marshal(ps)
|
||||
require.NoError(t, err)
|
||||
expected := `{"signed_header_1":{"message":{"slot":"1","proposer_index":"1","parent_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","body_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"},"signed_header_2":{"message":{"slot":"1","proposer_index":"1","parent_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","body_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
func pbExecutionPayloadHeader(t *testing.T) *eth.ExecutionPayloadHeader {
|
||||
bfpg := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
return ð.ExecutionPayloadHeader{
|
||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
|
||||
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BaseFeePerGas: bfpg.SSZBytes(),
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecutionPayloadHeader_MarshalJSON(t *testing.T) {
|
||||
h := &ExecutionPayloadHeader{
|
||||
ExecutionPayloadHeader: pbExecutionPayloadHeader(t),
|
||||
}
|
||||
b, err := json.Marshal(h)
|
||||
require.NoError(t, err)
|
||||
expected := `{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"452312848583266388373324160190187140051835877600158453279131187530910662656","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}`
|
||||
require.Equal(t, expected, string(b))
|
||||
}
|
||||
|
||||
var testBuilderBid = `{
|
||||
"version":"bellatrix",
|
||||
"data":{
|
||||
"message":{
|
||||
"header":{
|
||||
"parent_hash":"0xa0513a503d5bd6e89a144c3268e5b7e9da9dbf63df125a360e3950a7d0d67131",
|
||||
"fee_recipient":"0xdfb434922631787e43725c6b926e989875125751",
|
||||
"state_root":"0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45",
|
||||
"receipts_root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"prev_randao":"0xc2fa210081542a87f334b7b14a2da3275e4b281dd77b007bcfcb10e34c42052e",
|
||||
"block_number":"1",
|
||||
"gas_limit":"10000000",
|
||||
"gas_used":"0",
|
||||
"timestamp":"4660",
|
||||
"extra_data":"0x",
|
||||
"base_fee_per_gas":"7",
|
||||
"block_hash":"0x10746fa06c248e7eacd4ff8ad8b48a826c227387ee31a6aa5eb4d83ddad34f07",
|
||||
"transactions_root":"0x7ffe241ea60187fdb0187bfa22de35d1f9bed7ab061d9401fd47e34a54fbede1"
|
||||
},
|
||||
"value":"452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"pubkey":"0x8645866c95cbc2e08bc77ccad473540eddf4a1f51a2a8edc8d7a673824218f7f68fe565f1ab38dadd5c855b45bbcec95"
|
||||
},
|
||||
"signature":"0x9183ebc1edf9c3ab2bbd7abdc3b59c6b249d6647b5289a97eea36d9d61c47f12e283f64d928b1e7f5b8a5182b714fa921954678ea28ca574f5f232b2f78cf8900915a2993b396e3471e0655291fec143a300d41408f66478c8208e0f9be851dc"
|
||||
}
|
||||
}`
|
||||
|
||||
func TestBuilderBidUnmarshalUint256(t *testing.T) {
|
||||
base10 := "452312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
var expectedValue big.Int
|
||||
require.NoError(t, expectedValue.UnmarshalText([]byte(base10)))
|
||||
r := &ExecHeaderResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testBuilderBid), r))
|
||||
//require.Equal(t, expectedValue, r.Data.Message.Value)
|
||||
marshaled := r.Data.Message.Value.String()
|
||||
require.Equal(t, base10, marshaled)
|
||||
require.Equal(t, 0, expectedValue.Cmp(r.Data.Message.Value.Int))
|
||||
}
|
||||
|
||||
func TestMathBigUnmarshal(t *testing.T) {
|
||||
base10 := "452312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
var expectedValue big.Int
|
||||
require.NoError(t, expectedValue.UnmarshalText([]byte(base10)))
|
||||
marshaled, err := expectedValue.MarshalText()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, base10, string(marshaled))
|
||||
|
||||
var u256 Uint256
|
||||
require.NoError(t, u256.UnmarshalText([]byte("452312848583266388373324160190187140051835877600158453279131187530910662656")))
|
||||
}
|
||||
|
||||
func TestUint256Unmarshal(t *testing.T) {
|
||||
base10 := "452312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
bi := new(big.Int)
|
||||
bi, ok := bi.SetString(base10, 10)
|
||||
require.Equal(t, true, ok)
|
||||
s := struct {
|
||||
BigNumber Uint256 `json:"big_number"`
|
||||
}{
|
||||
BigNumber: Uint256{Int: bi},
|
||||
}
|
||||
m, err := json.Marshal(s)
|
||||
require.NoError(t, err)
|
||||
expected := `{"big_number":"452312848583266388373324160190187140051835877600158453279131187530910662656"}`
|
||||
require.Equal(t, expected, string(m))
|
||||
}
|
||||
|
||||
func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
|
||||
expected, err := os.ReadFile("testdata/blinded-block.json")
|
||||
require.NoError(t, err)
|
||||
b := &BlindedBeaconBlockBellatrix{BlindedBeaconBlockBellatrix: ð.BlindedBeaconBlockBellatrix{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Body: ð.BlindedBeaconBlockBodyBellatrix{
|
||||
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
Eth1Data: pbEth1Data(),
|
||||
Graffiti: ezDecode(t, "0xdeadbeefc0ffee"),
|
||||
ProposerSlashings: []*eth.ProposerSlashing{pbProposerSlashing(t)},
|
||||
AttesterSlashings: []*eth.AttesterSlashing{pbAttesterSlashing(t)},
|
||||
Attestations: []*eth.Attestation{pbAttestation(t)},
|
||||
Deposits: []*eth.Deposit{pbDeposit(t)},
|
||||
VoluntaryExits: []*eth.SignedVoluntaryExit{pbSignedVoluntaryExit(t)},
|
||||
SyncAggregate: pbSyncAggregate(),
|
||||
ExecutionPayloadHeader: pbExecutionPayloadHeader(t),
|
||||
},
|
||||
}}
|
||||
m, err := json.Marshal(b)
|
||||
require.NoError(t, err)
|
||||
// string error output is easier to deal with
|
||||
// -1 end slice index on expected is to get rid of trailing newline
|
||||
// if you update this fixture and this test breaks, you probably removed the trailing newline
|
||||
require.Equal(t, string(expected[0:len(expected)-1]), string(m))
|
||||
}
|
||||
|
||||
func TestRoundTripUint256(t *testing.T) {
|
||||
vs := "4523128485832663883733241601901871400518358776001584532791311875309106626"
|
||||
u := stringToUint256(vs)
|
||||
sb := u.SSZBytes()
|
||||
require.Equal(t, 32, len(sb))
|
||||
uu := sszBytesToUint256(sb)
|
||||
require.Equal(t, true, bytes.Equal(u.SSZBytes(), uu.SSZBytes()))
|
||||
require.Equal(t, vs, uu.String())
|
||||
}
|
||||
|
||||
func TestRoundTripProtoUint256(t *testing.T) {
|
||||
h := pbExecutionPayloadHeader(t)
|
||||
h.BaseFeePerGas = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
hm := &ExecutionPayloadHeader{ExecutionPayloadHeader: h}
|
||||
m, err := json.Marshal(hm)
|
||||
require.NoError(t, err)
|
||||
hu := &ExecutionPayloadHeader{}
|
||||
require.NoError(t, json.Unmarshal(m, hu))
|
||||
hp, err := hu.ToProto()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, h.BaseFeePerGas, hp.BaseFeePerGas)
|
||||
}
|
||||
|
||||
func TestExecutionPayloadHeaderRoundtrip(t *testing.T) {
|
||||
expected, err := os.ReadFile("testdata/execution-payload.json")
|
||||
require.NoError(t, err)
|
||||
hu := &ExecutionPayloadHeader{}
|
||||
require.NoError(t, json.Unmarshal(expected, hu))
|
||||
m, err := json.Marshal(hu)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, string(expected[0:len(expected)-1]), string(m))
|
||||
}
|
||||
@@ -31,26 +31,26 @@ func processField(s interface{}, processors []fieldProcessor) error {
|
||||
sliceElem := t.Field(i).Type.Elem()
|
||||
kind := sliceElem.Kind()
|
||||
// Recursively process slices to struct pointers.
|
||||
if kind == reflect.Ptr && sliceElem.Elem().Kind() == reflect.Struct {
|
||||
switch {
|
||||
case kind == reflect.Ptr && sliceElem.Elem().Kind() == reflect.Struct:
|
||||
for j := 0; j < v.Field(i).Len(); j++ {
|
||||
if err := processField(v.Field(i).Index(j).Interface(), processors); err != nil {
|
||||
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Process each string in string slices.
|
||||
if kind == reflect.String {
|
||||
case kind == reflect.String:
|
||||
for _, proc := range processors {
|
||||
_, hasTag := t.Field(i).Tag.Lookup(proc.tag)
|
||||
if hasTag {
|
||||
for j := 0; j < v.Field(i).Len(); j++ {
|
||||
if err := proc.f(v.Field(i).Index(j)); err != nil {
|
||||
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
||||
}
|
||||
if !hasTag {
|
||||
continue
|
||||
}
|
||||
for j := 0; j < v.Field(i).Len(); j++ {
|
||||
if err := proc.f(v.Field(i).Index(j)); err != nil {
|
||||
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
// Recursively process struct pointers.
|
||||
case reflect.Ptr:
|
||||
|
||||
@@ -121,8 +121,9 @@ func (g *Gateway) Start() {
|
||||
}
|
||||
|
||||
g.server = &http.Server{
|
||||
Addr: g.cfg.gatewayAddr,
|
||||
Handler: corsMux,
|
||||
Addr: g.cfg.gatewayAddr,
|
||||
Handler: corsMux,
|
||||
ReadHeaderTimeout: time.Second,
|
||||
}
|
||||
|
||||
go func() {
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
"head_sync_committee_info.go",
|
||||
"init_sync_process_block.go",
|
||||
"log.go",
|
||||
"merge_ascii_art.go",
|
||||
"metrics.go",
|
||||
"new_slot.go",
|
||||
"options.go",
|
||||
@@ -64,6 +65,7 @@ go_library(
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/forks/bellatrix:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
@@ -136,6 +138,7 @@ go_test(
|
||||
"//beacon-chain/powchain/testing:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//beacon-chain/state/v3:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
|
||||
@@ -9,7 +9,9 @@ import (
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
@@ -27,6 +29,48 @@ var _ ChainInfoFetcher = (*Service)(nil)
|
||||
var _ TimeFetcher = (*Service)(nil)
|
||||
var _ ForkFetcher = (*Service)(nil)
|
||||
|
||||
// prepareForkchoiceState prepares a beacon state with the given data to mock
|
||||
// insert into forkchoice
|
||||
func prepareForkchoiceState(
|
||||
_ context.Context,
|
||||
slot types.Slot,
|
||||
blockRoot [32]byte,
|
||||
parentRoot [32]byte,
|
||||
payloadHash [32]byte,
|
||||
justifiedEpoch types.Epoch,
|
||||
finalizedEpoch types.Epoch,
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
blockHeader := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
|
||||
executionHeader := ðpb.ExecutionPayloadHeader{
|
||||
BlockHash: payloadHash[:],
|
||||
}
|
||||
|
||||
justifiedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: justifiedEpoch,
|
||||
}
|
||||
|
||||
finalizedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: finalizedEpoch,
|
||||
}
|
||||
|
||||
base := ðpb.BeaconStateBellatrix{
|
||||
Slot: slot,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
BlockRoots: make([][]byte, 1),
|
||||
CurrentJustifiedCheckpoint: justifiedCheckpoint,
|
||||
FinalizedCheckpoint: finalizedCheckpoint,
|
||||
LatestExecutionPayloadHeader: executionHeader,
|
||||
LatestBlockHeader: blockHeader,
|
||||
}
|
||||
|
||||
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
||||
st, err := v3.InitializeFromProto(base)
|
||||
return st, blockRoot, err
|
||||
}
|
||||
|
||||
func TestHeadRoot_Nil(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
@@ -36,9 +80,9 @@ func TestHeadRoot_Nil(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_ForkChoiceStore(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
p := c.ForkChoiceStore()
|
||||
require.Equal(t, 0, int(p.FinalizedEpoch()))
|
||||
require.Equal(t, types.Epoch(0), p.FinalizedCheckpoint().Epoch)
|
||||
}
|
||||
|
||||
func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
|
||||
@@ -283,26 +327,55 @@ func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
|
||||
}
|
||||
func TestService_ChainHeads_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
roots, slots := c.ChainHeads()
|
||||
require.DeepEqual(t, [][32]byte{{'c'}, {'d'}, {'e'}}, roots)
|
||||
require.DeepEqual(t, []types.Slot{102, 103, 104}, slots)
|
||||
}
|
||||
|
||||
//
|
||||
// A <- B <- C
|
||||
// \ \
|
||||
// \ ---------- E
|
||||
// ---------- D
|
||||
|
||||
func TestService_ChainHeads_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
roots, slots := c.ChainHeads()
|
||||
require.Equal(t, 3, len(roots))
|
||||
@@ -379,9 +452,13 @@ func TestService_IsOptimistic_ProtoArray(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -395,9 +472,13 @@ func TestService_IsOptimistic_DoublyLinkedTree(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -414,9 +495,13 @@ func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
|
||||
|
||||
func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
@@ -425,9 +510,13 @@ func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
|
||||
|
||||
func TestService_IsOptimisticForRoot_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
@@ -437,7 +526,7 @@ func TestService_IsOptimisticForRoot_DoublyLinkedTree(t *testing.T) {
|
||||
func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
@@ -502,7 +591,7 @@ func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
|
||||
func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
@@ -566,7 +655,7 @@ func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
|
||||
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !develop
|
||||
// +build !develop
|
||||
|
||||
package blockchain
|
||||
|
||||
|
||||
@@ -27,6 +27,8 @@ var (
|
||||
errWSBlockNotFound = errors.New("weak subjectivity root not found in db")
|
||||
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
|
||||
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
|
||||
// errNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
|
||||
errNotDescendantOfFinalized = invalidBlock{errors.New("not descendant of finalized checkpoint")}
|
||||
)
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
@@ -270,10 +269,10 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
recipient, err := s.cfg.BeaconDB.FeeRecipientByValidatorID(ctx, proposerID)
|
||||
switch {
|
||||
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
|
||||
if feeRecipient.String() == fieldparams.EthBurnAddressHex {
|
||||
if feeRecipient.String() == params.BeaconConfig().EthBurnAddressHex {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"validatorIndex": proposerID,
|
||||
"burnAddress": fieldparams.EthBurnAddressHex,
|
||||
"burnAddress": params.BeaconConfig().EthBurnAddressHex,
|
||||
}).Warn("Fee recipient is currently using the burn address, " +
|
||||
"you will not be rewarded transaction fees on this setting. " +
|
||||
"Please set a different eth address as the fee recipient. " +
|
||||
|
||||
@@ -11,9 +11,10 @@ import (
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
bstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
@@ -43,7 +44,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, altairBlk))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -51,13 +52,20 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
st, _ := util.DeterministicGenesisState(t, 10)
|
||||
service.head = &head{
|
||||
state: st,
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 1, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -177,7 +185,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st, tt.finalizedRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, tt.finalizedRoot))
|
||||
fc := ðpb.Checkpoint{Epoch: 1, Root: tt.finalizedRoot[:]}
|
||||
fc := ðpb.Checkpoint{Epoch: 0, Root: tt.finalizedRoot[:]}
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(fc, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(fc, [32]byte{'b'})
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
@@ -185,7 +193,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
headRoot: tt.headRoot,
|
||||
headBlock: tt.blk,
|
||||
}
|
||||
_, err := service.notifyForkchoiceUpdate(ctx, arg)
|
||||
_, err = service.notifyForkchoiceUpdate(ctx, arg)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
@@ -282,7 +290,7 @@ func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wbg))
|
||||
|
||||
// Insert blocks into forkchoice
|
||||
fcs := doublylinkedtree.New(0, 0)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -292,20 +300,36 @@ func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
service.justifiedBalances.balances = []uint64{50, 100, 200}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 2, brb, bra, [32]byte{'B'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 3, brc, brb, [32]byte{'C'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 4, brd, brc, [32]byte{'D'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 5, bre, brb, [32]byte{'E'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 6, brf, bre, [32]byte{'F'}, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 7, brg, bre, [32]byte{'G'}, 0, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, brc, brb, [32]byte{'C'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, brd, brc, [32]byte{'D'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, bre, brb, [32]byte{'E'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 6, brf, bre, [32]byte{'F'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 7, brg, bre, [32]byte{'G'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// Insert Attestations to D, F and G so that they have higher weight than D
|
||||
// Ensure G is head
|
||||
fcs.ProcessAttestation(ctx, []uint64{0}, brd, 1)
|
||||
fcs.ProcessAttestation(ctx, []uint64{1}, brf, 1)
|
||||
fcs.ProcessAttestation(ctx, []uint64{2}, brg, 1)
|
||||
headRoot, err := fcs.Head(ctx, bra, []uint64{50, 100, 200})
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: bra}
|
||||
require.NoError(t, fcs.UpdateJustifiedCheckpoint(jc))
|
||||
headRoot, err := fcs.Head(ctx, []uint64{50, 100, 200})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, brg, headRoot)
|
||||
|
||||
@@ -326,7 +350,7 @@ func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
|
||||
_, err = service.notifyForkchoiceUpdate(ctx, a)
|
||||
require.ErrorIs(t, ErrInvalidPayload, err)
|
||||
// Ensure Head is D
|
||||
headRoot, err = fcs.Head(ctx, bra, service.justifiedBalances.balances)
|
||||
headRoot, err = fcs.Head(ctx, service.justifiedBalances.balances)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, brd, headRoot)
|
||||
|
||||
@@ -343,7 +367,7 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -383,11 +407,15 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
r, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 1, r, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, r, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
tests := []struct {
|
||||
postState state.BeaconState
|
||||
postState bstate.BeaconState
|
||||
invalidBlock bool
|
||||
isValidPayload bool
|
||||
blk interfaces.SignedBeaconBlock
|
||||
@@ -531,7 +559,9 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
root := [32]byte{'a'}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, root, root, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, root, root, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(tt.postState)
|
||||
require.NoError(t, err)
|
||||
isValidPayload, err := service.notifyNewPayload(ctx, postVersion, postHeader, tt.blk)
|
||||
@@ -555,7 +585,7 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -598,7 +628,7 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -779,7 +809,7 @@ func Test_GetPayloadAttribute(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, fieldparams.EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient).String())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient).String())
|
||||
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
@@ -800,7 +830,7 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
stateGen := stategen.New(beaconDB)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stateGen),
|
||||
@@ -816,8 +846,9 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
genesisRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, genesisRoot, params.BeaconConfig().ZeroHash,
|
||||
params.BeaconConfig().ZeroHash, 0, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, genesisRoot, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
genesisSummary := ðpb.StateSummary{
|
||||
Root: genesisStateRoot[:],
|
||||
Slot: 0,
|
||||
@@ -848,8 +879,9 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
Slot: 320,
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, opStateSummary))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 320, opRoot, genesisRoot,
|
||||
params.BeaconConfig().ZeroHash, 10, 10))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 320, opRoot, genesisRoot, params.BeaconConfig().ZeroHash, 10, 10)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, opRoot))
|
||||
require.NoError(t, service.updateFinalized(ctx, opCheckpoint))
|
||||
cp, err := service.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
|
||||
@@ -876,8 +908,9 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
Slot: 640,
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, validSummary))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 640, validRoot, params.BeaconConfig().ZeroHash,
|
||||
params.BeaconConfig().ZeroHash, 20, 20))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 640, validRoot, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 20, 20)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, fcs.SetOptimisticToValid(ctx, validRoot))
|
||||
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, validRoot))
|
||||
require.NoError(t, service.updateFinalized(ctx, validCheckpoint))
|
||||
@@ -897,7 +930,7 @@ func TestService_removeInvalidBlockAndState(t *testing.T) {
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(protoarray.New(0, 0)),
|
||||
WithForkChoiceStore(protoarray.New()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -953,7 +986,7 @@ func TestService_getPayloadHash(t *testing.T) {
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(protoarray.New(0, 0)),
|
||||
WithForkChoiceStore(protoarray.New()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -9,8 +9,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
@@ -78,10 +80,7 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte,
|
||||
}
|
||||
// To get head before the first justified epoch, the fork choice will start with origin root
|
||||
// instead of zero hashes.
|
||||
headStartRoot := bytesutil.ToBytes32(j.Root)
|
||||
if headStartRoot == params.BeaconConfig().ZeroHash {
|
||||
headStartRoot = s.originBlockRoot
|
||||
}
|
||||
headStartRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(j.Root))
|
||||
|
||||
// In order to process head, fork choice store requires justified info.
|
||||
// If the fork choice store is missing justified block info, a node should
|
||||
@@ -92,31 +91,42 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte,
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
if features.Get().EnableForkChoiceDoublyLinkedTree {
|
||||
s.cfg.ForkChoiceStore = doublylinkedtree.New(j.Epoch, f.Epoch)
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch)
|
||||
st, err := s.cfg.StateGen.StateByRoot(ctx, s.ensureRootNotZeros(headStartRoot))
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j); err != nil {
|
||||
if features.Get().EnableForkChoiceDoublyLinkedTree {
|
||||
s.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore = protoarray.New()
|
||||
}
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, st, f, j); err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return s.cfg.ForkChoiceStore.Head(ctx, headStartRoot, balances)
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: j.Epoch, Root: headStartRoot}
|
||||
fc := &forkchoicetypes.Checkpoint{Epoch: f.Epoch, Root: s.ensureRootNotZeros(bytesutil.ToBytes32(f.Root))}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(jc); err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fc); err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.Head(ctx, balances)
|
||||
}
|
||||
|
||||
// This saves head info to the local service cache, it also saves the
|
||||
// new head root to the DB.
|
||||
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock interfaces.SignedBeaconBlock, headState state.BeaconState) error {
|
||||
func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock interfaces.SignedBeaconBlock, headState state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
|
||||
defer span.End()
|
||||
|
||||
// Do nothing if head hasn't changed.
|
||||
r, err := s.HeadRoot(ctx)
|
||||
oldHeadroot, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if headRoot == bytesutil.ToBytes32(r) {
|
||||
if newHeadRoot == bytesutil.ToBytes32(oldHeadroot) {
|
||||
return nil
|
||||
}
|
||||
if err := wrapper.BeaconBlockIsNil(headBlock); err != nil {
|
||||
@@ -128,17 +138,19 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock int
|
||||
|
||||
// If the head state is not available, just return nil.
|
||||
// There's nothing to cache
|
||||
if !s.cfg.BeaconDB.HasStateSummary(ctx, headRoot) {
|
||||
if !s.cfg.BeaconDB.HasStateSummary(ctx, newHeadRoot) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
headSlot := s.HeadSlot()
|
||||
newHeadSlot := headBlock.Block().Slot()
|
||||
s.headLock.RLock()
|
||||
oldHeadRoot := s.headRoot()
|
||||
oldStateRoot := s.headBlock().Block().StateRoot()
|
||||
s.headLock.RUnlock()
|
||||
headSlot := s.HeadSlot()
|
||||
newHeadSlot := headBlock.Block().Slot()
|
||||
newStateRoot := headBlock.Block().StateRoot()
|
||||
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != bytesutil.ToBytes32(r) {
|
||||
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != bytesutil.ToBytes32(oldHeadroot) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||
"oldSlot": fmt.Sprintf("%d", headSlot),
|
||||
@@ -154,7 +166,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock int
|
||||
Slot: newHeadSlot,
|
||||
Depth: absoluteSlotDifference,
|
||||
OldHeadBlock: oldHeadRoot[:],
|
||||
NewHeadBlock: headRoot[:],
|
||||
NewHeadBlock: newHeadRoot[:],
|
||||
OldHeadState: oldStateRoot,
|
||||
NewHeadState: newStateRoot,
|
||||
Epoch: slots.ToEpoch(newHeadSlot),
|
||||
@@ -162,25 +174,24 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock int
|
||||
},
|
||||
})
|
||||
|
||||
if err := s.saveOrphanedAtts(ctx, bytesutil.ToBytes32(r)); err != nil {
|
||||
if err := s.saveOrphanedAtts(ctx, bytesutil.ToBytes32(oldHeadroot), newHeadRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reorgCount.Inc()
|
||||
}
|
||||
|
||||
// Cache the new head info.
|
||||
s.setHead(headRoot, headBlock, headState)
|
||||
s.setHead(newHeadRoot, headBlock, headState)
|
||||
|
||||
// Save the new head root to DB.
|
||||
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
|
||||
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, newHeadRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save head root in DB")
|
||||
}
|
||||
|
||||
// Forward an event capturing a new chain head over a common event feed
|
||||
// done in a goroutine to avoid blocking the critical runtime main routine.
|
||||
go func() {
|
||||
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot, headRoot[:]); err != nil {
|
||||
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot, newHeadRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not notify event feed of new chain head")
|
||||
}
|
||||
}()
|
||||
@@ -353,35 +364,48 @@ func (s *Service) notifyNewHeadEvent(
|
||||
return nil
|
||||
}
|
||||
|
||||
// This saves the attestations inside the beacon block with respect to root `orphanedRoot` back into the
|
||||
// attestation pool. It also filters out the attestations that is one epoch older as a
|
||||
// defense so invalid attestations don't flow into the attestation pool.
|
||||
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte) error {
|
||||
orphanedBlk, err := s.getBlock(ctx, orphanedRoot)
|
||||
if err != nil {
|
||||
// This saves the attestations between `orphanedRoot` and the common ancestor root that is derived using `newHeadRoot`.
|
||||
// It also filters out the attestations that is one epoch older as a defense so invalid attestations don't flow into the attestation pool.
|
||||
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
|
||||
commonAncestorRoot, err := s.ForkChoicer().CommonAncestorRoot(ctx, newHeadRoot, orphanedRoot)
|
||||
switch {
|
||||
// Exit early if there's no common ancestor as there would be nothing to save.
|
||||
case errors.Is(err, forkchoice.ErrUnknownCommonAncestor):
|
||||
return nil
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
|
||||
if orphanedBlk == nil || orphanedBlk.IsNil() {
|
||||
return errors.New("orphaned block can't be nil")
|
||||
}
|
||||
|
||||
for _, a := range orphanedBlk.Block().Body().Attestations() {
|
||||
// Is the attestation one epoch older.
|
||||
if a.Data.Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
|
||||
continue
|
||||
for orphanedRoot != commonAncestorRoot {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if helpers.IsAggregated(a) {
|
||||
if err := s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
saveOrphanedAttCount.Inc()
|
||||
}
|
||||
|
||||
orphanedBlk, err := s.getBlock(ctx, orphanedRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the block is an epoch older, break out of the loop since we can't include atts anyway.
|
||||
// This prevents stuck within this for loop longer than necessary.
|
||||
if orphanedBlk.Block().Slot()+params.BeaconConfig().SlotsPerEpoch <= s.CurrentSlot() {
|
||||
break
|
||||
}
|
||||
for _, a := range orphanedBlk.Block().Body().Attestations() {
|
||||
// if the attestation is one epoch older, it wouldn't been useful to save it.
|
||||
if a.Data.Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
|
||||
continue
|
||||
}
|
||||
if helpers.IsAggregated(a) {
|
||||
if err := s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
saveOrphanedAttCount.Inc()
|
||||
}
|
||||
orphanedRoot = bytesutil.ToBytes32(orphanedBlk.Block().ParentRoot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package blockchain
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -10,9 +11,11 @@ import (
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
@@ -49,6 +52,9 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
|
||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
service.head = &head{
|
||||
slot: 0,
|
||||
root: oldRoot,
|
||||
@@ -64,6 +70,9 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
@@ -93,6 +102,9 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
|
||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
service.head = &head{
|
||||
slot: 0,
|
||||
root: oldRoot,
|
||||
@@ -110,6 +122,9 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
@@ -153,14 +168,14 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
|
||||
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{}, [32]byte{'b'})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{})
|
||||
headRoot, err := service.updateHead(context.Background(), []uint64{})
|
||||
_, err = service.updateHead(context.Background(), []uint64{})
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, service.saveHead(context.Background(), headRoot, wsb, st))
|
||||
}
|
||||
|
||||
func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
@@ -229,57 +244,374 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts(t *testing.T) {
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
func TestSaveOrphanedAtts_NoCommonAncestor(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now()
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2 -- 3
|
||||
// -4
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r))
|
||||
|
||||
require.Equal(t, len(b.Block.Body.Attestations), service.cfg.AttPool.AggregatedAttestationCount())
|
||||
savedAtts := service.cfg.AttPool.AggregatedAttestations()
|
||||
atts := b.Block.Body.Attestations
|
||||
require.DeepSSZEqual(t, atts, savedAtts)
|
||||
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
blk1.Block.ParentRoot = rG[:]
|
||||
r1, err := blk1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
assert.NoError(t, err)
|
||||
blk2.Block.ParentRoot = r1[:]
|
||||
r2, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
|
||||
assert.NoError(t, err)
|
||||
blk3.Block.ParentRoot = r2[:]
|
||||
r3, err := blk3.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk4 := util.NewBeaconBlock()
|
||||
blk4.Block.Slot = 4
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
|
||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2 -- 3
|
||||
// \-4
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
blk1.Block.ParentRoot = rG[:]
|
||||
r1, err := blk1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
assert.NoError(t, err)
|
||||
blk2.Block.ParentRoot = r1[:]
|
||||
r2, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
|
||||
assert.NoError(t, err)
|
||||
blk3.Block.ParentRoot = r2[:]
|
||||
r3, err := blk3.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk4 := util.NewBeaconBlock()
|
||||
blk4.Block.Slot = 4
|
||||
blk4.Block.ParentRoot = rG[:]
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []*ethpb.Attestation{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
blk2.Block.Body.Attestations[0],
|
||||
blk1.Block.Body.Attestations[0],
|
||||
}
|
||||
atts := service.cfg.AttPool.AggregatedAttestations()
|
||||
sort.Slice(atts, func(i, j int) bool {
|
||||
return atts[i].Data.Slot > atts[j].Data.Slot
|
||||
})
|
||||
require.DeepEqual(t, wantAtts, atts)
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2
|
||||
// \-4
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
blk1.Block.ParentRoot = rG[:]
|
||||
r1, err := blk1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
assert.NoError(t, err)
|
||||
blk2.Block.ParentRoot = r1[:]
|
||||
r2, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk4 := util.NewBeaconBlock()
|
||||
blk4.Block.Slot = 4
|
||||
blk4.Block.ParentRoot = rG[:]
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r2, r4))
|
||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts_NoCommonAncestor_DoublyLinkedTrie(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableForkChoiceDoublyLinkedTree: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2 -- 3
|
||||
// -4
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r))
|
||||
|
||||
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
blk1.Block.ParentRoot = rG[:]
|
||||
r1, err := blk1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
assert.NoError(t, err)
|
||||
blk2.Block.ParentRoot = r1[:]
|
||||
r2, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
|
||||
assert.NoError(t, err)
|
||||
blk3.Block.ParentRoot = r2[:]
|
||||
r3, err := blk3.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk4 := util.NewBeaconBlock()
|
||||
blk4.Block.Slot = 4
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
|
||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableForkChoiceDoublyLinkedTree: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2 -- 3
|
||||
// \-4
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
blk1.Block.ParentRoot = rG[:]
|
||||
r1, err := blk1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
assert.NoError(t, err)
|
||||
blk2.Block.ParentRoot = r1[:]
|
||||
r2, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
|
||||
assert.NoError(t, err)
|
||||
blk3.Block.ParentRoot = r2[:]
|
||||
r3, err := blk3.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk4 := util.NewBeaconBlock()
|
||||
blk4.Block.Slot = 4
|
||||
blk4.Block.ParentRoot = rG[:]
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
|
||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
wantAtts := []*ethpb.Attestation{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
blk2.Block.Body.Attestations[0],
|
||||
blk1.Block.Body.Attestations[0],
|
||||
}
|
||||
atts := service.cfg.AttPool.AggregatedAttestations()
|
||||
sort.Slice(atts, func(i, j int) bool {
|
||||
return atts[i].Data.Slot > atts[j].Data.Slot
|
||||
})
|
||||
require.DeepEqual(t, wantAtts, atts)
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableForkChoiceDoublyLinkedTree: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2
|
||||
// \-4
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||
assert.NoError(t, err)
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
blk1.Block.ParentRoot = rG[:]
|
||||
r1, err := blk1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
assert.NoError(t, err)
|
||||
blk2.Block.ParentRoot = r1[:]
|
||||
r2, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk4 := util.NewBeaconBlock()
|
||||
blk4.Block.Slot = 4
|
||||
blk4.Block.ParentRoot = rG[:]
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r2, r4))
|
||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
savedAtts := service.cfg.AttPool.AggregatedAttestations()
|
||||
atts := b.Block.Body.Attestations
|
||||
require.DeepNotSSZEqual(t, atts, savedAtts)
|
||||
}
|
||||
|
||||
func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New(0, 0)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -296,7 +628,7 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
|
||||
fcp := ðpb.Checkpoint{
|
||||
Root: bellatrixBlkRoot[:],
|
||||
Epoch: 1,
|
||||
Epoch: 0,
|
||||
}
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(fcp, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(fcp, [32]byte{'b'})
|
||||
@@ -309,6 +641,9 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||
headRoot := service.headRoot()
|
||||
require.Equal(t, [32]byte{}, headRoot)
|
||||
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, bellatrixBlkRoot, [32]byte{}, [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||
newRoot, err := service.updateHead(ctx, []uint64{1, 2})
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, headRoot, newRoot)
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
@@ -60,27 +62,50 @@ func logBlockSyncStatus(block interfaces.BeaconBlock, blockRoot [32]byte, justif
|
||||
return err
|
||||
}
|
||||
level := log.Logger.GetLevel()
|
||||
|
||||
log = log.WithField("slot", block.Slot())
|
||||
if level >= logrus.DebugLevel {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(block.ParentRoot())[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime),
|
||||
}).Debug("Synced new block")
|
||||
} else {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
}).Info("Synced new block")
|
||||
log = log.WithField("slotInEpoch", block.Slot()%params.BeaconConfig().SlotsPerEpoch)
|
||||
log = log.WithField("justifiedEpoch", justified.Epoch)
|
||||
log = log.WithField("justifiedRoot", fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]))
|
||||
log = log.WithField("parentRoot", fmt.Sprintf("0x%s...", hex.EncodeToString(block.ParentRoot())[:8]))
|
||||
log = log.WithField("version", version.String(block.Version()))
|
||||
log = log.WithField("sinceSlotStartTime", prysmTime.Now().Sub(startTime))
|
||||
log = log.WithField("chainServiceProcessedTime", prysmTime.Now().Sub(receivedTime))
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
}).Info("Synced new block")
|
||||
return nil
|
||||
}
|
||||
|
||||
// logs payload related data every slot.
|
||||
func logPayload(block interfaces.BeaconBlock) error {
|
||||
isExecutionBlk, err := blocks.IsExecutionBlock(block.Body())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine if block is execution block")
|
||||
}
|
||||
if !isExecutionBlk {
|
||||
return nil
|
||||
}
|
||||
payload, err := block.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.GasLimit == 0 {
|
||||
return errors.New("gas limit should not be 0")
|
||||
}
|
||||
gasUtilized := float64(payload.GasUsed) / float64(payload.GasLimit)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash)),
|
||||
"blockNumber": payload.BlockNumber,
|
||||
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
|
||||
}).Debug("Synced new payload")
|
||||
return nil
|
||||
}
|
||||
|
||||
81
beacon-chain/blockchain/merge_ascii_art.go
Normal file
81
beacon-chain/blockchain/merge_ascii_art.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package blockchain
|
||||
|
||||
var mergeAsciiArt = `
|
||||
|
||||
+?$$$$$$?*; ;*?$$$$?*; +!$$$$$$?!;
|
||||
!##$???$@##$+ !&#@$??$&#@* +@#&$????$##+
|
||||
!##; +@#&; !##* ;$##* @#$ ;&#+
|
||||
!##; !##+ ;##$ @#@ $#&* ++;
|
||||
!##; ;@#&; *##* ?##; ;$##&$!+;
|
||||
!##?!!!?$##$+ !##+ !##+ ;!$@##&$!;
|
||||
!##@@@@$$!+ *##* ?##; ;*?@#&!
|
||||
!##; ;##$ @#@ ;?$; ?##+
|
||||
!##; ?##! ;?##+ ;##+ ;$#&;
|
||||
!##; !&#&$??$&#@* ;&#&$$??$$&#@+
|
||||
+??; ;*?$$$$?+ ;+!?$$$$$!+
|
||||
;;;;
|
||||
;+!??$$$?!*+; ;*?$@&&&&&@@$!*;
|
||||
*?@############&$?+ ;!@###############&$!;
|
||||
;!@####&@$????$$@#####@! ;?&####$?*++;++*!?@####&?;
|
||||
*@###&$*; ;*$&###@* *&###@!; ;!@###&!
|
||||
!###&!; ;?&###? *####! *&###?
|
||||
!###@+ ;$###$ +###&+ ;$###?
|
||||
;###&; $###? ;;+*!??$$$$$$$$??!$###* ;@###*
|
||||
!###! &###?$@&#####################@$?!+; +###$
|
||||
$###+ ;*?&#################################&$?*; &##&;
|
||||
$###+ ;!$&########################################&$!; &###;
|
||||
*###? ;!$################################################$*; +###@
|
||||
;@###+ +$&####################################################&?; $###!
|
||||
+###&+ *$##########################################################$+ ;$###$;
|
||||
*&###?; +$##############################################################?*@###$;
|
||||
+$###&?+ ;$#####################################################################?;
|
||||
*@####@&#####################################################################*
|
||||
+$&##################@?!*++*!$&###################&$?*++*!?$###############&*
|
||||
$###############&?+ ;!@###############@!; ;!@##############!
|
||||
;$##############&!; *&###########&!; !&#############!
|
||||
$##############@+ +@* ;$#########$; +@* ;$#############!
|
||||
?##############$; *###* $#######$; +&##! ?#############+
|
||||
+##############$ !#####! $#####@; *#####? ?############@;
|
||||
@#############@; !#######! ;&####+ *#######? $############?
|
||||
+##############+ ?#########? $###$ !#########$ +############&;
|
||||
$#############$ ;$###########? !###? !###########$; $############!
|
||||
@#############* !#############! ?###$ *#############? +############@
|
||||
;&############&; +?@#######&$+; ;&####; ;+?@#######&?+; @############;
|
||||
;#############@ +$&#&$*; ;$#####@; +$&#&$*; $############+
|
||||
;#############$ *+ ;+; ;*; *&#######&! ;*; ;+; +*; $############*
|
||||
&############@ ;$@!; +$@! ;?###########$; *@$* ;*$@+ $############*
|
||||
$############&; ;$#&$*+!@##* +@#############@+ +&#@?++$&#@; @############+
|
||||
*#############* $######&+ !#################? +@######$; +#############;
|
||||
@############$ ?####@+ ;$###################$; ;@####$ $############@
|
||||
*#############* !##@; +@#####################&* ;$##? +#############!
|
||||
$#############+ *$; ?#########################?; $! +&############&;
|
||||
;&#############! +$###########################@+ *&#############?
|
||||
+##############$*; ;?###############################$+ *$##############@;
|
||||
*###############&$?!!$###################################$?!?$&################+
|
||||
*###################################&@$$$$@&#################################!
|
||||
+&##############################&?+; ;+?&#############################?
|
||||
;$############################@; ;@###########################!
|
||||
?###########################* *##########################*
|
||||
+@#############&$!+$#######? ?########$+!$&###########@+
|
||||
!&###########&; $#######$+ +$########? +&##########?;
|
||||
;?###########&* *@#######@$!; ;$@########@* *##########$+
|
||||
;?&##########?; ;*$&####&$* ;!$&####&$* ;$#########@*
|
||||
;!@#########@!; ;++*+; ;*; ;+*++; ;!&########$*
|
||||
*$&########&$*; ;*$&#&$*; +*$&#######&?+
|
||||
;*$&#########&@$$@&#########&@$$@&########&$*;
|
||||
;+?$&##############################&$?+;
|
||||
;+!?$@&###################&$$!+;
|
||||
;++*!??$$$$$$$?!!*+;
|
||||
|
||||
;;; ;;+*++; ;;;++;;;++;;; ;+++;;;++++; ;;; ;; ;;; ;;;++;;;+++;; ;;;+++++++; ;+++++;
|
||||
;@#&+ +$&&@$@&#? @#@@@&#&@@@#$ ;$@@@@#&@@@@! !#@ !#$ +&#&; !#&@@@#&@@@&&; !#&@@@@@@@* $#&@@@&&$+
|
||||
$#?#@; ?#&!; *#$ &&;;;$#!;;*#$ ;;;*#@;;;; $#? +#&; ;@#?#$ !#!;;*#@;;;@#; ?#$;;;;;;; @#* ;!&#?
|
||||
*#$ ?#$ *#&; ;+; ++ $#! ;+; *#$ ;&#+ $#* ?#? $#! ;+; +#@ ++ ?#$ $#* ;&#*
|
||||
;&&; @#* $#$ $#! *#$ !#@; !#$ +#@ ;&#; +#@ ?#@??????! $#* $#$
|
||||
$#!;;;*#&; $#? $#! *#$ $#? ;&&; ;@#*;;;!#$ +#@ ?#@??????! $#* $#$
|
||||
!##@@@@@&#$ !#@; $#! *#$ ;&#+ $#* ?#&@@@@@##! +#@ ?#$ $#* @#*
|
||||
;&&+;;;;;;@#* ;$#$+ @$ $#! *#$ *#@!#? *#@;;;;;;+##+ +#@ ?#$ $#* +$#$
|
||||
$#* +#&; ;?&#@$$$$#$ +$$&#@$$; ;$$$$@##$$$$* $##@; ;&#+ !#@; $$@##$$* ?#&$$$$$$$! @#@$$$@&@!
|
||||
;** +*; +*!?!*+; ;******* ;***********+ ;**; ;*+ **; *******+ ;*********+ +*!!!!*;
|
||||
|
||||
`
|
||||
@@ -158,6 +158,10 @@ var (
|
||||
Name: "forkchoice_updated_optimistic_node_count",
|
||||
Help: "Count the number of optimistic nodes after forkchoiceUpdated EE call",
|
||||
})
|
||||
missedPayloadIDFilledCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "missed_payload_id_filled_count",
|
||||
Help: "",
|
||||
})
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
return []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
|
||||
@@ -5,7 +5,9 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
@@ -68,7 +70,8 @@ func (s *Service) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(bj, h)
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(bj); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Epoch: bj.Epoch, Root: bytesutil.ToBytes32(bj.Root)}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
|
||||
func TestService_newSlot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -36,11 +36,21 @@ func TestService_newSlot(t *testing.T) {
|
||||
bj, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, 0, 0)) // genesis
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 32, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0)) // finalized
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0)) // justified
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 96, bj, [32]byte{'a'}, [32]byte{}, 0, 0)) // best justified
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 97, [32]byte{'d'}, [32]byte{}, [32]byte{}, 0, 0)) // bad
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // genesis
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 32, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // finalized
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // justified
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 96, bj, [32]byte{'a'}, [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // best justified
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 97, [32]byte{'d'}, [32]byte{}, [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // bad
|
||||
|
||||
type args struct {
|
||||
slot types.Slot
|
||||
|
||||
@@ -78,6 +78,8 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
|
||||
"mergeBlockParentTotalDifficulty": mergeBlockParentTD,
|
||||
}).Info("Validated terminal block")
|
||||
|
||||
log.Info(mergeAsciiArt)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -109,7 +109,7 @@ func Test_validateMergeBlock(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -152,7 +152,7 @@ func Test_validateMergeBlock(t *testing.T) {
|
||||
func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
@@ -28,7 +29,7 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0)),
|
||||
WithForkChoiceStore(protoarray.New()),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
@@ -140,7 +141,7 @@ func TestStore_OnAttestation_ErrorConditions_DoublyLinkedTree(t *testing.T) {
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(doublylinkedtree.New(0, 0)),
|
||||
WithForkChoiceStore(doublylinkedtree.New()),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
@@ -250,7 +251,7 @@ func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -268,7 +269,9 @@ func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0]))
|
||||
}
|
||||
|
||||
@@ -276,7 +279,7 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := doublylinkedtree.New(0, 0)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -294,7 +297,9 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0]))
|
||||
}
|
||||
|
||||
@@ -483,7 +488,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -518,7 +523,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := doublylinkedtree.New(0, 0)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -599,10 +604,16 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, b32.Block.Slot, r32, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, b33.Block.Slot, r33, r32, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, b32.Block.Slot, r32, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, b33.Block.Slot, r33, r32, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
_, err = service.cfg.ForkChoiceStore.Head(ctx, r32, []uint64{})
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r32}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(jc))
|
||||
_, err = service.cfg.ForkChoiceStore.Head(ctx, []uint64{})
|
||||
require.NoError(t, err)
|
||||
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
@@ -16,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/forks/bellatrix"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
@@ -117,7 +119,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
}
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, postStateVersion, postStateHeader, signed)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not verify new payload")
|
||||
return fmt.Errorf("could not verify new payload: %v", err)
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preStateVersion, preStateHeader, signed); err != nil {
|
||||
@@ -130,7 +132,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
|
||||
}
|
||||
s.insertSlashingsToForkChoiceStore(ctx, signed.Block().Body().AttesterSlashings())
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, signed.Block().Body().AttesterSlashings())
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
@@ -220,10 +222,12 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(postState.CurrentJustifiedCheckpoint(), h)
|
||||
// Update Forkchoice checkpoints
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(psj); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Epoch: psj.Epoch, Root: bytesutil.ToBytes32(psj.Root)}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(psf); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Epoch: psf.Epoch, Root: bytesutil.ToBytes32(psf.Root)}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -280,7 +284,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
}
|
||||
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
|
||||
if err := s.cfg.ForkChoiceStore.Prune(ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not prune proto array fork choice nodes")
|
||||
return errors.Wrap(err, "could not prune fork choice nodes")
|
||||
}
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
|
||||
if err != nil {
|
||||
@@ -334,33 +338,38 @@ func getStateVersionAndPayload(st state.BeaconState) (int, *ethpb.ExecutionPaylo
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeaconBlock,
|
||||
blockRoots [][32]byte) ([]*ethpb.Checkpoint, []*ethpb.Checkpoint, error) {
|
||||
blockRoots [][32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
if len(blks) == 0 || len(blockRoots) == 0 {
|
||||
return nil, nil, errors.New("no blocks provided")
|
||||
return errors.New("no blocks provided")
|
||||
}
|
||||
|
||||
if len(blks) != len(blockRoots) {
|
||||
return nil, nil, errWrongBlockCount
|
||||
return errWrongBlockCount
|
||||
}
|
||||
|
||||
if err := wrapper.BeaconBlockIsNil(blks[0]); err != nil {
|
||||
return nil, nil, invalidBlock{err}
|
||||
return invalidBlock{err}
|
||||
}
|
||||
b := blks[0].Block()
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
if err := s.verifyBlkPreState(ctx, b); err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(b.ParentRoot()))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
if preState == nil || preState.IsNil() {
|
||||
return nil, nil, fmt.Errorf("nil pre state for slot %d", b.Slot())
|
||||
return fmt.Errorf("nil pre state for slot %d", b.Slot())
|
||||
}
|
||||
|
||||
// Fill in missing blocks
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0].Block(), preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
return errors.Wrap(err, "could not fill in missing blocks to forkchoice")
|
||||
}
|
||||
|
||||
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
@@ -381,7 +390,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
for i, b := range blks {
|
||||
v, h, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
preVersionAndHeaders[i] = &versionAndHeader{
|
||||
version: v,
|
||||
@@ -390,7 +399,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
|
||||
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
|
||||
if err != nil {
|
||||
return nil, nil, invalidBlock{err}
|
||||
return invalidBlock{err}
|
||||
}
|
||||
// Save potential boundary states.
|
||||
if slots.IsEpochStart(preState.Slot()) {
|
||||
@@ -401,7 +410,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
|
||||
v, h, err = getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
postVersionAndHeaders[i] = &versionAndHeader{
|
||||
version: v,
|
||||
@@ -411,52 +420,71 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
}
|
||||
verify, err := sigSet.Verify()
|
||||
if err != nil {
|
||||
return nil, nil, invalidBlock{err}
|
||||
return invalidBlock{err}
|
||||
}
|
||||
if !verify {
|
||||
return nil, nil, errors.New("batch block signature verification failed")
|
||||
return errors.New("batch block signature verification failed")
|
||||
}
|
||||
|
||||
// blocks have been verified, add them to forkchoice and call the engine
|
||||
// blocks have been verified, save them and call the engine
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, len(blks))
|
||||
var isValidPayload bool
|
||||
for i, b := range blks {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx,
|
||||
isValidPayload, err = s.notifyNewPayload(ctx,
|
||||
postVersionAndHeaders[i].version,
|
||||
postVersionAndHeaders[i].header, b)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
|
||||
preVersionAndHeaders[i].header, b); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, b.Block(), blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoots[i]); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not set optimistic block to valid")
|
||||
return err
|
||||
}
|
||||
}
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
pendingNodes[len(blks)-i-1] = args
|
||||
s.saveInitSyncBlock(blockRoots[i], b)
|
||||
if err = s.handleBlockAfterBatchVerify(ctx, b, blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Insert all nodes but the last one to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertOptimisticChain(ctx, pendingNodes); err != nil {
|
||||
return errors.Wrap(err, "could not insert batch to forkchoice")
|
||||
}
|
||||
// Insert the last block to forkchoice
|
||||
lastBR := blockRoots[len(blks)-1]
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastBR); err != nil {
|
||||
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
|
||||
}
|
||||
// Prune forkchoice store only if the new finalized checkpoint is higher
|
||||
// than the finalized checkpoint in forkchoice store.
|
||||
if fCheckpoints[len(blks)-1].Epoch > s.cfg.ForkChoiceStore.FinalizedCheckpoint().Epoch {
|
||||
if err := s.cfg.ForkChoiceStore.Prune(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(fCheckpoints[len(blks)-1].Root))); err != nil {
|
||||
return errors.Wrap(err, "could not prune fork choice nodes")
|
||||
}
|
||||
}
|
||||
|
||||
// Set their optimistic status
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, lastBR); err != nil {
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
|
||||
for r, st := range boundaries {
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Also saves the last post state which to be used as pre state for the next batch.
|
||||
lastB := blks[len(blks)-1]
|
||||
lastBR := blockRoots[len(blockRoots)-1]
|
||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
headState: preState,
|
||||
@@ -464,12 +492,9 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
headBlock: lastB.Block(),
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
if err := s.saveHeadNoDB(ctx, lastB, lastBR, preState); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return fCheckpoints, jCheckpoints, nil
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState)
|
||||
}
|
||||
|
||||
// handles a block after the block's batch has been verified, where we can save blocks
|
||||
@@ -520,6 +545,10 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed interf
|
||||
return err
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(fCheckpoint, h)
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Epoch: fCheckpoint.Epoch, Root: bytesutil.ToBytes32(fCheckpoint.Root)}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -536,16 +565,20 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
|
||||
return err
|
||||
}
|
||||
// Update caches for the next epoch at epoch boundary slot - 1.
|
||||
if err := helpers.UpdateCommitteeCache(copied, coreTime.CurrentEpoch(copied)); err != nil {
|
||||
if err := helpers.UpdateCommitteeCache(ctx, copied, coreTime.CurrentEpoch(copied)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, copied); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if postState.Slot() >= s.nextEpochBoundarySlot {
|
||||
if err := reportEpochMetrics(ctx, postState, s.head.state); err != nil {
|
||||
s.headLock.RLock()
|
||||
st := s.head.state
|
||||
s.headLock.RUnlock()
|
||||
if err := reportEpochMetrics(ctx, postState, st); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var err error
|
||||
s.nextEpochBoundarySlot, err = slots.EpochStart(coreTime.NextEpoch(postState))
|
||||
if err != nil {
|
||||
@@ -554,7 +587,7 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
|
||||
|
||||
// Update caches at epoch boundary slot.
|
||||
// The following updates have short cut to return nil cheaply if fulfilled during boundary slot - 1.
|
||||
if err := helpers.UpdateCommitteeCache(postState, coreTime.CurrentEpoch(postState)); err != nil {
|
||||
if err := helpers.UpdateCommitteeCache(ctx, postState, coreTime.CurrentEpoch(postState)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, postState); err != nil {
|
||||
@@ -567,14 +600,13 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
|
||||
|
||||
// This feeds in the block and block's attestations to fork choice store. It's allows fork choice store
|
||||
// to gain information on the most current chain.
|
||||
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk interfaces.BeaconBlock, root [32]byte,
|
||||
st state.BeaconState) error {
|
||||
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk interfaces.BeaconBlock, root [32]byte, st state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockAndAttestationsToForkChoiceStore")
|
||||
defer span.End()
|
||||
|
||||
fCheckpoint := st.FinalizedCheckpoint()
|
||||
jCheckpoint := st.CurrentJustifiedCheckpoint()
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint); err != nil {
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, st, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
// Feed in block's attestations to fork choice store.
|
||||
@@ -592,26 +624,16 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk interfaces.BeaconBlock,
|
||||
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk interfaces.BeaconBlock, root [32]byte, st state.BeaconState, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
// Feed in block to fork choice store.
|
||||
|
||||
payloadHash, err := getBlockPayloadHash(blk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
|
||||
blk.Slot(), root, bytesutil.ToBytes32(blk.ParentRoot()), payloadHash,
|
||||
jCheckpoint.Epoch,
|
||||
fCheckpoint.Epoch)
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
}
|
||||
|
||||
// Inserts attester slashing indices to fork choice store.
|
||||
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
|
||||
func (s *Service) insertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
|
||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
|
||||
for _, slashing := range slashings {
|
||||
indices := blocks.SlashableAttesterIndices(slashing)
|
||||
for _, index := range indices {
|
||||
@@ -620,18 +642,6 @@ func (s *Service) insertSlashingsToForkChoiceStore(ctx context.Context, slashing
|
||||
}
|
||||
}
|
||||
|
||||
func getBlockPayloadHash(blk interfaces.BeaconBlock) ([32]byte, error) {
|
||||
payloadHash := [32]byte{}
|
||||
if blocks.IsPreBellatrixVersion(blk.Version()) {
|
||||
return payloadHash, nil
|
||||
}
|
||||
payload, err := blk.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return payloadHash, err
|
||||
}
|
||||
return bytesutil.ToBytes32(payload.BlockHash), nil
|
||||
}
|
||||
|
||||
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
|
||||
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
|
||||
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock, st state.BeaconState) error {
|
||||
@@ -649,10 +659,6 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interface
|
||||
// This removes the attestations from the mem pool. It will only remove the attestations if input root `r` is canonical,
|
||||
// meaning the block `b` is part of the canonical chain.
|
||||
func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock) error {
|
||||
if !features.Get().CorrectlyPruneCanonicalAtts {
|
||||
return nil
|
||||
}
|
||||
|
||||
canonical, err := s.IsCanonical(ctx, r)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -688,7 +694,7 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
||||
if err != nil {
|
||||
return invalidBlock{err}
|
||||
}
|
||||
if blocks.IsEmptyPayload(payload) {
|
||||
if bellatrix.IsEmptyPayload(payload) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -708,3 +714,53 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
||||
}
|
||||
return s.validateMergeBlock(ctx, blk)
|
||||
}
|
||||
|
||||
// This routine checks if there is a cached proposer payload ID available for the next slot proposer.
|
||||
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
|
||||
func (s *Service) fillMissingPayloadIDRoutine(ctx context.Context, stateFeed *event.Feed) {
|
||||
// Wait for state to be initialized.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := stateFeed.Subscribe(stateChannel)
|
||||
go func() {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
stateSub.Unsubscribe()
|
||||
return
|
||||
case <-stateChannel:
|
||||
stateSub.Unsubscribe()
|
||||
break
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case ti := <-ticker.C:
|
||||
if !atHalfSlot(ti) {
|
||||
continue
|
||||
}
|
||||
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot() + 1)
|
||||
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
|
||||
if has && id == [8]byte{} {
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: s.headState(ctx),
|
||||
headRoot: s.headRoot(),
|
||||
headBlock: s.headBlock().Block(),
|
||||
}); err != nil {
|
||||
log.WithError(err).Error("Could not prepare payload on empty ID")
|
||||
}
|
||||
missedPayloadIDFilledCount.Inc()
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Returns true if time `t` is halfway through the slot in sec.
|
||||
func atHalfSlot(t time.Time) bool {
|
||||
s := params.BeaconConfig().SecondsPerSlot
|
||||
return uint64(t.Second())%s == s/2
|
||||
}
|
||||
|
||||
@@ -7,6 +7,9 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
@@ -215,7 +218,8 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(cpt, h)
|
||||
// Update forkchoice's justified checkpoint
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(cpt); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Epoch: cpt.Epoch, Root: bytesutil.ToBytes32(cpt.Root)}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -241,8 +245,8 @@ func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpo
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(cp, h)
|
||||
|
||||
return nil
|
||||
return s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Epoch: cp.Epoch, Root: bytesutil.ToBytes32(cp.Root)})
|
||||
}
|
||||
|
||||
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
|
||||
@@ -262,7 +266,7 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
|
||||
|
||||
fRoot := bytesutil.ToBytes32(cp.Root)
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
|
||||
if err != nil {
|
||||
if err != nil && err != protoarray.ErrUnknownNodeRoot && err != doublylinkedtree.ErrNilNode {
|
||||
return err
|
||||
}
|
||||
if !optimistic {
|
||||
@@ -346,11 +350,8 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot types.Slot)
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfaces.BeaconBlock,
|
||||
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
pendingNodes := make([]interfaces.BeaconBlock, 0)
|
||||
pendingRoots := make([][32]byte, 0)
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
|
||||
parentRoot := bytesutil.ToBytes32(blk.ParentRoot())
|
||||
slot := blk.Slot()
|
||||
// Fork choice only matters from last finalized slot.
|
||||
finalized, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
@@ -360,39 +361,31 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
higherThanFinalized := slot > fSlot
|
||||
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: blk,
|
||||
JustifiedCheckpoint: jCheckpoint, FinalizedCheckpoint: fCheckpoint})
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
for !s.cfg.ForkChoiceStore.HasNode(parentRoot) && s.cfg.BeaconDB.HasBlock(ctx, parentRoot) && higherThanFinalized {
|
||||
b, err := s.getBlock(ctx, parentRoot)
|
||||
root := bytesutil.ToBytes32(blk.ParentRoot())
|
||||
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
|
||||
b, err := s.getBlock(ctx, root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pendingNodes = append(pendingNodes, b.Block())
|
||||
copiedRoot := parentRoot
|
||||
pendingRoots = append(pendingRoots, copiedRoot)
|
||||
parentRoot = bytesutil.ToBytes32(b.Block().ParentRoot())
|
||||
slot = b.Block().Slot()
|
||||
higherThanFinalized = slot > fSlot
|
||||
}
|
||||
|
||||
// Insert parent nodes to fork choice store in reverse order.
|
||||
// Lower slots should be at the end of the list.
|
||||
for i := len(pendingNodes) - 1; i >= 0; i-- {
|
||||
b := pendingNodes[i]
|
||||
r := pendingRoots[i]
|
||||
payloadHash, err := getBlockPayloadHash(blk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
|
||||
b.Slot(), r, bytesutil.ToBytes32(b.ParentRoot()), payloadHash,
|
||||
jCheckpoint.Epoch,
|
||||
fCheckpoint.Epoch); err != nil {
|
||||
return errors.Wrap(err, "could not process block for proto array fork choice")
|
||||
if b.Block().Slot() <= fSlot {
|
||||
break
|
||||
}
|
||||
root = bytesutil.ToBytes32(b.Block().ParentRoot())
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
|
||||
JustifiedCheckpoint: jCheckpoint,
|
||||
FinalizedCheckpoint: fCheckpoint}
|
||||
pendingNodes = append(pendingNodes, args)
|
||||
}
|
||||
return nil
|
||||
if len(pendingNodes) == 1 {
|
||||
return nil
|
||||
}
|
||||
if root != s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root)) {
|
||||
return errNotDescendantOfFinalized
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.InsertOptimisticChain(ctx, pendingNodes)
|
||||
}
|
||||
|
||||
// inserts finalized deposits into our finalized deposit trie.
|
||||
|
||||
@@ -21,11 +21,11 @@ import (
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
@@ -47,7 +47,7 @@ func TestStore_OnBlock_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -150,7 +150,7 @@ func TestStore_OnBlock_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New(0, 0)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -253,7 +253,7 @@ func TestStore_OnBlock_ProposerBoostEarly(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New(0, 0)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
@@ -268,7 +268,7 @@ func TestStore_OnBlock_ProposerBoostEarly(t *testing.T) {
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.BoostProposerRoot(ctx, args))
|
||||
_, err = service.cfg.ForkChoiceStore.Head(ctx, params.BeaconConfig().ZeroHash, []uint64{})
|
||||
_, err = service.cfg.ForkChoiceStore.Head(ctx, []uint64{})
|
||||
require.ErrorContains(t, "could not apply proposer boost score: invalid proposer boost root", err)
|
||||
}
|
||||
|
||||
@@ -293,7 +293,7 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
|
||||
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
|
||||
service.cfg.ForkChoiceStore = protoarray.New()
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(gRoot, wsb)
|
||||
@@ -305,7 +305,7 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
|
||||
var blks []interfaces.SignedBeaconBlock
|
||||
var blkRoots [][32]byte
|
||||
var firstState state.BeaconState
|
||||
for i := 1; i < 10; i++ {
|
||||
for i := 1; i < 97; i++ {
|
||||
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
@@ -331,9 +331,78 @@ func TestStore_OnBlockBatch_ProtoArray(t *testing.T) {
|
||||
rBlock.Block.ParentRoot = gRoot[:]
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
|
||||
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
|
||||
_, _, err = service.onBlockBatch(ctx, blks, blkRoots[1:])
|
||||
err = service.onBlockBatch(ctx, blks, blkRoots[1:])
|
||||
require.ErrorIs(t, errWrongBlockCount, err)
|
||||
_, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
|
||||
service.originBlockRoot = blkRoots[1]
|
||||
err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
|
||||
require.NoError(t, err)
|
||||
jcp, err := service.store.JustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
jroot := bytesutil.ToBytes32(jcp.Root)
|
||||
require.Equal(t, blkRoots[63], jroot)
|
||||
require.Equal(t, types.Epoch(2), service.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch)
|
||||
}
|
||||
|
||||
func TestStore_OnBlockBatch_PruneOK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.cfg.ForkChoiceStore = protoarray.New()
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(gRoot, wsb)
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
bState := st.Copy()
|
||||
|
||||
var blks []interfaces.SignedBeaconBlock
|
||||
var blkRoots [][32]byte
|
||||
var firstState state.BeaconState
|
||||
for i := 1; i < 128; i++ {
|
||||
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
bState, err = transition.ExecuteStateTransition(ctx, bState, wsb)
|
||||
if i == 32 {
|
||||
firstState = bState.Copy()
|
||||
}
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(root, wsb)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, wsb)
|
||||
blkRoots = append(blkRoots, root)
|
||||
}
|
||||
|
||||
for i := 0; i < 32; i++ {
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[i]))
|
||||
}
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: blkRoots[31][:], Epoch: 1}, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: blkRoots[31][:], Epoch: 1}, [32]byte{'b'})
|
||||
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[31], firstState))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, firstState, blkRoots[31]))
|
||||
err = service.onBlockBatch(ctx, blks[32:], blkRoots[32:])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -358,7 +427,7 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
|
||||
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(gRoot, wsb)
|
||||
@@ -370,7 +439,7 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
|
||||
var blks []interfaces.SignedBeaconBlock
|
||||
var blkRoots [][32]byte
|
||||
var firstState state.BeaconState
|
||||
for i := 1; i < 10; i++ {
|
||||
for i := 1; i < 97; i++ {
|
||||
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), types.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b)
|
||||
@@ -396,10 +465,16 @@ func TestStore_OnBlockBatch_DoublyLinkedTree(t *testing.T) {
|
||||
rBlock.Block.ParentRoot = gRoot[:]
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
|
||||
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
|
||||
_, _, err = service.onBlockBatch(ctx, blks, blkRoots[1:])
|
||||
err = service.onBlockBatch(ctx, blks, blkRoots[1:])
|
||||
require.ErrorIs(t, errWrongBlockCount, err)
|
||||
_, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
|
||||
service.originBlockRoot = blkRoots[1]
|
||||
err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
|
||||
require.NoError(t, err)
|
||||
jcp, err := service.store.JustifiedCheckpt()
|
||||
require.NoError(t, err)
|
||||
jroot := bytesutil.ToBytes32(jcp.Root)
|
||||
require.Equal(t, blkRoots[63], jroot)
|
||||
require.Equal(t, types.Epoch(2), service.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch)
|
||||
}
|
||||
|
||||
func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
@@ -421,7 +496,7 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'a'})
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{'b'})
|
||||
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
service.saveInitSyncBlock(gRoot, wsb)
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
bState := st.Copy()
|
||||
@@ -452,10 +527,9 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
rBlock.Block.ParentRoot = gRoot[:]
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
|
||||
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
|
||||
cp1, cp2, err := service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
|
||||
service.originBlockRoot = blkRoots[1]
|
||||
err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blkCount-1, len(cp1))
|
||||
require.Equal(t, blkCount-1, len(cp2))
|
||||
}
|
||||
|
||||
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
@@ -503,7 +577,7 @@ func TestShouldUpdateJustified_ReturnFalse_ProtoArray(t *testing.T) {
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
|
||||
service.cfg.ForkChoiceStore = protoarray.New()
|
||||
lastJustifiedBlk := util.NewBeaconBlock()
|
||||
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
|
||||
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
|
||||
@@ -536,7 +610,7 @@ func TestShouldUpdateJustified_ReturnFalse_DoublyLinkedTree(t *testing.T) {
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
lastJustifiedBlk := util.NewBeaconBlock()
|
||||
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
|
||||
lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot()
|
||||
@@ -583,7 +657,7 @@ func TestCachedPreState_CanGetFromStateSummary_ProtoArray(t *testing.T) {
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
|
||||
service.cfg.ForkChoiceStore = protoarray.New()
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(gRoot, wsb)
|
||||
@@ -620,7 +694,7 @@ func TestCachedPreState_CanGetFromStateSummary_DoublyLinkedTree(t *testing.T) {
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(gRoot, wsb)
|
||||
@@ -654,7 +728,7 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: gRoot[:]}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
|
||||
service.cfg.ForkChoiceStore = protoarray.New()
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(gRoot, wsb)
|
||||
@@ -685,7 +759,7 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(protoarray.New(0, 0)),
|
||||
WithForkChoiceStore(protoarray.New()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -731,8 +805,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = protoarray.New()
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -755,12 +828,14 @@ func TestFillForkChoiceMissingBlocks_CanSave_ProtoArray(t *testing.T) {
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: roots[0]}, [32]byte{})
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
// 4 nodes from the block tree 1. B3 - B4 - B6 - B8
|
||||
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[3])), "Didn't save node")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])), "Didn't save node")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])), "Didn't save node")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])), "Didn't save node")
|
||||
@@ -776,8 +851,7 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -801,12 +875,14 @@ func TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree(t *testing.T) {
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: roots[0]}, [32]byte{})
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[3])), "Didn't save node")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])), "Didn't save node")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])), "Didn't save node")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])), "Didn't save node")
|
||||
@@ -822,8 +898,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = protoarray.New()
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -847,14 +922,15 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_ProtoArray(t *testing.T) {
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: roots[0]}, [32]byte{})
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
// 4 nodes from the block tree 1. B3 - B4 - B6 - B8
|
||||
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
// Ensure all roots and their respective blocks exist.
|
||||
wantedRoots := [][]byte{roots[0], roots[3], roots[4], roots[6], roots[8]}
|
||||
wantedRoots := [][]byte{roots[3], roots[4], roots[6], roots[8]}
|
||||
for i, rt := range wantedRoots {
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(rt)), fmt.Sprintf("Didn't save node: %d", i))
|
||||
assert.Equal(t, true, service.cfg.BeaconDB.HasBlock(context.Background(), bytesutil.ToBytes32(rt)))
|
||||
@@ -871,8 +947,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: make([]byte, 32)}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -896,14 +971,15 @@ func TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree(t *testing.T) {
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: roots[0]}, [32]byte{})
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
assert.Equal(t, 5, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
// 5 nodes from the block tree 1. B3 - B4 - B6 - B8
|
||||
assert.Equal(t, 4, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
// Ensure all roots and their respective blocks exist.
|
||||
wantedRoots := [][]byte{roots[0], roots[3], roots[4], roots[6], roots[8]}
|
||||
wantedRoots := [][]byte{roots[3], roots[4], roots[6], roots[8]}
|
||||
for i, rt := range wantedRoots {
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(rt)), fmt.Sprintf("Didn't save node: %d", i))
|
||||
assert.Equal(t, true, service.cfg.BeaconDB.HasBlock(context.Background(), bytesutil.ToBytes32(rt)))
|
||||
@@ -920,9 +996,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
|
||||
// Set finalized epoch to 1.
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 1}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = protoarray.New()
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -936,7 +1010,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
|
||||
// Define a tree branch, slot 63 <- 64 <- 65
|
||||
// Define a tree branch, slot 63 <- 64 <- 65 <- 66
|
||||
b63 := util.NewBeaconBlock()
|
||||
b63.Block.Slot = 63
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b63)
|
||||
@@ -955,20 +1029,28 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray(t *testing.T) {
|
||||
b65 := util.NewBeaconBlock()
|
||||
b65.Block.Slot = 65
|
||||
b65.Block.ParentRoot = r64[:]
|
||||
r65, err := b65.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b65)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
b66 := util.NewBeaconBlock()
|
||||
b66.Block.Slot = 66
|
||||
b66.Block.ParentRoot = r65[:]
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b66)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
// Set finalized epoch to 2.
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 2, Root: r64[:]}, [32]byte{})
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// There should be 2 nodes, block 65 and block 64.
|
||||
assert.Equal(t, 2, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
|
||||
// Block with slot 63 should be in fork choice because it's less than finalized epoch 1.
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r63), "Didn't save node")
|
||||
// We should have saved 1 node: block 65
|
||||
assert.Equal(t, 1, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r65), "Didn't save node")
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing.T) {
|
||||
@@ -981,9 +1063,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New(0, 0)
|
||||
// Set finalized epoch to 1.
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 1}, [32]byte{})
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
@@ -1016,27 +1096,75 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree(t *testing
|
||||
b65 := util.NewBeaconBlock()
|
||||
b65.Block.Slot = 65
|
||||
b65.Block.ParentRoot = r64[:]
|
||||
r65, err := b65.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b65)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
b66 := util.NewBeaconBlock()
|
||||
b66.Block.Slot = 66
|
||||
b66.Block.ParentRoot = r65[:]
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b66)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
|
||||
// Set finalized epoch to 1.
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 2, Root: r64[:]}, [32]byte{})
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// There should be 2 nodes, block 65 and block 64.
|
||||
assert.Equal(t, 2, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
// There should be 1 node: block 65
|
||||
assert.Equal(t, 1, service.cfg.ForkChoiceStore.NodeCount(), "Miss match nodes")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r65), "Didn't save node")
|
||||
}
|
||||
|
||||
// Block with slot 63 should be in fork choice because it's less than finalized epoch 1.
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r63), "Didn't save node")
|
||||
func TestFillForkChoiceMissingBlocks_FinalizedSibling_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 9
|
||||
blk.Block.ParentRoot = roots[8]
|
||||
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: roots[1]}, [32]byte{})
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.ErrorIs(t, errNotDescendantOfFinalized, err)
|
||||
}
|
||||
|
||||
// blockTree1 constructs the following tree:
|
||||
// /- B1
|
||||
// B0 /- B5 - B7
|
||||
// \- B3 - B4 - B6 - B8
|
||||
// (B1, and B3 are all from the same slots)
|
||||
func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byte, error) {
|
||||
genesisRoot = bytesutil.PadTo(genesisRoot, 32)
|
||||
b0 := util.NewBeaconBlock()
|
||||
@@ -1144,7 +1272,7 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -1219,7 +1347,9 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), params.BeaconConfig().ZeroHash, 0, 0)) // Saves blocks to fork choice store.
|
||||
state, blkRoot, err := prepareForkchoiceState(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
}
|
||||
|
||||
r, err := service.ancestor(context.Background(), r200[:], 150)
|
||||
@@ -1233,7 +1363,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -1266,7 +1396,9 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb)) // Saves blocks to DB.
|
||||
}
|
||||
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(context.Background(), 200, r200, r200, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
state, blkRoot, err := prepareForkchoiceState(context.Background(), 200, r200, r200, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
r, err := service.ancestor(context.Background(), r200[:], 150)
|
||||
require.NoError(t, err)
|
||||
@@ -1293,7 +1425,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -1438,7 +1570,7 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
|
||||
func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
@@ -1447,6 +1579,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -1494,7 +1627,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
func TestOnBlock_NilBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
@@ -1513,7 +1646,7 @@ func TestOnBlock_NilBlock(t *testing.T) {
|
||||
func TestOnBlock_InvalidSignature(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
@@ -1554,7 +1687,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
@@ -1563,6 +1696,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -1685,11 +1819,6 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemoveBlockAttestationsInPool_Canonical(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
CorrectlyPruneCanonicalAtts: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
@@ -1711,11 +1840,6 @@ func TestRemoveBlockAttestationsInPool_Canonical(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemoveBlockAttestationsInPool_NonCanonical(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
CorrectlyPruneCanonicalAtts: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
@@ -1792,12 +1916,13 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -1920,7 +2045,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
@@ -1965,13 +2090,13 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
|
||||
b.Block.Body.AttesterSlashings = slashings
|
||||
wb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
service.insertSlashingsToForkChoiceStore(ctx, wb.Block().Body().AttesterSlashings())
|
||||
service.InsertSlashingsToForkChoiceStore(ctx, wb.Block().Body().AttesterSlashings())
|
||||
}
|
||||
|
||||
func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts := []Option{
|
||||
@@ -1980,6 +2105,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -2043,7 +2169,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r2))
|
||||
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r3))
|
||||
require.NoError(t, service.cfg.BeaconDB.DeleteBlock(ctx, r4))
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0)
|
||||
service.cfg.ForkChoiceStore = protoarray.New()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2051,7 +2177,7 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
fcs := protoarray.New(0, 0)
|
||||
fcs := protoarray.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
|
||||
@@ -164,21 +164,26 @@ func (s *Service) UpdateHead(ctx context.Context) error {
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Resolving fork due to new attestation")
|
||||
}
|
||||
s.headLock.RLock()
|
||||
if s.headRoot() != newHeadRoot {
|
||||
log.WithFields(logrus.Fields{
|
||||
"oldHeadRoot": fmt.Sprintf("%#x", s.headRoot()),
|
||||
"newHeadRoot": fmt.Sprintf("%#x", newHeadRoot),
|
||||
}).Debug("Head changed due to attestations")
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
s.notifyEngineIfChangedHead(ctx, newHeadRoot)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This calls notify Forkchoice Update in the event that the head has changed
|
||||
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) {
|
||||
s.headLock.RLock()
|
||||
if newHeadRoot == [32]byte{} || s.headRoot() == newHeadRoot {
|
||||
s.headLock.RUnlock()
|
||||
return
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
|
||||
if !s.hasBlockInInitSyncOrDB(ctx, newHeadRoot) {
|
||||
log.Debug("New head does not exist in DB. Do nothing")
|
||||
|
||||
@@ -120,7 +120,9 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
service.processAttestations(ctx)
|
||||
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations()))
|
||||
@@ -225,7 +227,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: tRoot[:]}))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
b := util.NewBeaconBlock()
|
||||
wb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
@@ -233,6 +237,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wb))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wb.Block().Slot(), r, bytesutil.ToBytes32(wb.Block().ParentRoot()), [32]byte{}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
service.head.root = r // Old head
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.ForkchoiceAttestations()))
|
||||
require.NoError(t, err, service.UpdateHead(ctx))
|
||||
|
||||
@@ -71,11 +71,15 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaco
|
||||
|
||||
// Log block sync status.
|
||||
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
|
||||
return err
|
||||
log.WithError(err).Error("Unable to log block sync status")
|
||||
}
|
||||
// Log payload data
|
||||
if err := logPayload(blockCopy.Block()); err != nil {
|
||||
log.WithError(err).Error("Unable to log debug block payload data")
|
||||
}
|
||||
// Log state transition data.
|
||||
if err := logStateTransitionData(blockCopy.Block()); err != nil {
|
||||
return err
|
||||
log.WithError(err).Error("Unable to log state transition data")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -89,8 +93,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Sig
|
||||
defer span.End()
|
||||
|
||||
// Apply state transition on the incoming newly received block batches, one by one.
|
||||
_, _, err := s.onBlockBatch(ctx, blocks, blkRoots)
|
||||
if err != nil {
|
||||
if err := s.onBlockBatch(ctx, blocks, blkRoots); err != nil {
|
||||
err := errors.Wrap(err, "could not process block in batch")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
@@ -144,7 +147,7 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
|
||||
|
||||
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
|
||||
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
|
||||
s.insertSlashingsToForkChoiceStore(ctx, []*ethpb.AttesterSlashing{slashing})
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, []*ethpb.AttesterSlashing{slashing})
|
||||
}
|
||||
|
||||
func (s *Service) handlePostBlockOperations(b interfaces.BeaconBlock) error {
|
||||
|
||||
@@ -127,7 +127,7 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0)),
|
||||
WithForkChoiceStore(protoarray.New()),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
@@ -168,7 +168,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0)),
|
||||
WithForkChoiceStore(protoarray.New()),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
@@ -248,7 +248,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithForkChoiceStore(protoarray.New(0, 0)),
|
||||
WithForkChoiceStore(protoarray.New()),
|
||||
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
f "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
@@ -40,7 +41,6 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/time"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -138,19 +138,25 @@ func (s *Service) Start() {
|
||||
}
|
||||
}
|
||||
s.spawnProcessAttestationsRoutine(s.cfg.StateNotifier.StateFeed())
|
||||
s.fillMissingPayloadIDRoutine(s.ctx, s.cfg.StateNotifier.StateFeed())
|
||||
}
|
||||
|
||||
// Stop the blockchain service's main event loop and associated goroutines.
|
||||
func (s *Service) Stop() error {
|
||||
defer s.cancel()
|
||||
|
||||
// lock before accessing s.head, s.head.state, s.head.state.FinalizedCheckpoint().Root
|
||||
s.headLock.RLock()
|
||||
if s.cfg.StateGen != nil && s.head != nil && s.head.state != nil {
|
||||
r := s.head.state.FinalizedCheckpoint().Root
|
||||
s.headLock.RUnlock()
|
||||
// Save the last finalized state so that starting up in the following run will be much faster.
|
||||
if err := s.cfg.StateGen.ForceCheckpoint(s.ctx, s.head.state.FinalizedCheckpoint().Root); err != nil {
|
||||
if err := s.cfg.StateGen.ForceCheckpoint(s.ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
s.headLock.RUnlock()
|
||||
}
|
||||
|
||||
// Save initial sync cached blocks to the DB before stop.
|
||||
return s.cfg.BeaconDB.SaveBlocks(s.ctx, s.getInitSyncBlocks())
|
||||
}
|
||||
@@ -188,31 +194,40 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
if justified == nil {
|
||||
return errNilJustifiedCheckpoint
|
||||
}
|
||||
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
if finalized == nil {
|
||||
return errNilFinalizedCheckpoint
|
||||
}
|
||||
s.store = store.New(justified, finalized)
|
||||
|
||||
var forkChoicer f.ForkChoicer
|
||||
fRoot := bytesutil.ToBytes32(finalized.Root)
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
if features.Get().EnableForkChoiceDoublyLinkedTree {
|
||||
forkChoicer = doublylinkedtree.New(justified.Epoch, finalized.Epoch)
|
||||
forkChoicer = doublylinkedtree.New()
|
||||
} else {
|
||||
forkChoicer = protoarray.New(justified.Epoch, finalized.Epoch)
|
||||
forkChoicer = protoarray.New()
|
||||
}
|
||||
s.cfg.ForkChoiceStore = forkChoicer
|
||||
fb, err := s.getBlock(s.ctx, s.ensureRootNotZeros(fRoot))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint block")
|
||||
if err := forkChoicer.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
|
||||
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
}
|
||||
payloadHash, err := getBlockPayloadHash(fb.Block())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get execution payload hash")
|
||||
if err := forkChoicer.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
|
||||
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
|
||||
}
|
||||
fSlot := fb.Block().Slot()
|
||||
if err := forkChoicer.InsertOptimisticBlock(s.ctx, fSlot, fRoot, params.BeaconConfig().ZeroHash,
|
||||
payloadHash, justified.Epoch, finalized.Epoch); err != nil {
|
||||
|
||||
st, err := s.cfg.StateGen.StateByRoot(s.ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint state")
|
||||
}
|
||||
if err := forkChoicer.InsertNode(s.ctx, st, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not insert finalized block to forkchoice")
|
||||
}
|
||||
|
||||
@@ -225,18 +240,6 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
return errors.Wrap(err, "could not set finalized block as validated")
|
||||
}
|
||||
}
|
||||
|
||||
h := s.headBlock().Block()
|
||||
if h.Slot() > fSlot {
|
||||
log.WithFields(logrus.Fields{
|
||||
"startSlot": fSlot,
|
||||
"endSlot": h.Slot(),
|
||||
}).Info("Loading blocks to fork choice store, this may take a while.")
|
||||
if err := s.fillInForkChoiceMissingBlocks(s.ctx, h, finalized, justified); err != nil {
|
||||
return errors.Wrap(err, "could not fill in fork choice store missing blocks")
|
||||
}
|
||||
}
|
||||
|
||||
// not attempting to save initial sync blocks here, because there shouldn't be any until
|
||||
// after the statefeed.Initialized event is fired (below)
|
||||
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, finalized.Epoch); err != nil {
|
||||
@@ -439,7 +442,7 @@ func (s *Service) initializeBeaconChain(
|
||||
s.cfg.ChainStartFetcher.ClearPreGenesisData()
|
||||
|
||||
// Update committee shuffled indices for genesis epoch.
|
||||
if err := helpers.UpdateCommitteeCache(genesisState, 0 /* genesis epoch */); err != nil {
|
||||
if err := helpers.UpdateCommitteeCache(ctx, genesisState, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState); err != nil {
|
||||
@@ -472,17 +475,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
genesisCheckpoint := genesisState.FinalizedCheckpoint()
|
||||
s.store = store.New(genesisCheckpoint, genesisCheckpoint)
|
||||
|
||||
payloadHash, err := getBlockPayloadHash(genesisBlk.Block())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx,
|
||||
genesisBlk.Block().Slot(),
|
||||
genesisBlkRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
payloadHash,
|
||||
genesisCheckpoint.Epoch,
|
||||
genesisCheckpoint.Epoch); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
log.Fatalf("Could not process genesis block for fork choice: %v", err)
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, genesisBlkRoot); err != nil {
|
||||
|
||||
@@ -130,7 +130,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(protoarray.New(0, 0)),
|
||||
WithForkChoiceStore(protoarray.New()),
|
||||
WithAttestationService(attService),
|
||||
WithStateGen(stateGen),
|
||||
}
|
||||
@@ -505,7 +505,7 @@ func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(0, 0), BeaconDB: beaconDB},
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
@@ -526,7 +526,7 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
@@ -599,7 +599,7 @@ func BenchmarkHasBlockForkChoiceStore_ProtoArray(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(0, 0), BeaconDB: beaconDB},
|
||||
cfg: &config{ForkChoiceStore: protoarray.New(), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
@@ -622,7 +622,7 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0), BeaconDB: beaconDB},
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}, [32]byte{})
|
||||
|
||||
@@ -62,6 +62,7 @@ type ChainService struct {
|
||||
Genesis time.Time
|
||||
ForkChoiceStore forkchoice.ForkChoicer
|
||||
ReceiveBlockMockErr error
|
||||
OptimisticCheckRootReceived [32]byte
|
||||
}
|
||||
|
||||
// ForkChoicer mocks the same method in the chain service
|
||||
@@ -447,7 +448,8 @@ func (s *ChainService) IsOptimistic(_ context.Context) (bool, error) {
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot mocks the same method in the chain service.
|
||||
func (s *ChainService) IsOptimisticForRoot(_ context.Context, _ [32]byte) (bool, error) {
|
||||
func (s *ChainService) IsOptimisticForRoot(_ context.Context, root [32]byte) (bool, error) {
|
||||
s.OptimisticCheckRootReceived = root
|
||||
return s.Optimistic, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -30,8 +30,8 @@ type WeakSubjectivityVerifier struct {
|
||||
// NewWeakSubjectivityVerifier validates a checkpoint, and if valid, uses it to initialize a weak subjectivity verifier.
|
||||
func NewWeakSubjectivityVerifier(wsc *ethpb.Checkpoint, db weakSubjectivityDB) (*WeakSubjectivityVerifier, error) {
|
||||
if wsc == nil || len(wsc.Root) == 0 || wsc.Epoch == 0 {
|
||||
log.Info("No checkpoint for syncing provided, node will begin syncing from genesis. Checkpoint Sync is an optional feature that allows your node to sync from a more recent checkpoint, " +
|
||||
"which enhances the security of your local beacon node and the broader network. See https://docs.prylabs.network/docs/next/prysm-usage/checkpoint-sync/ to learn how to configure Checkpoint Sync.")
|
||||
log.Info("--weak-subjectivity-checkpoint not provided. Prysm recommends providing a weak subjectivity checkpoint" +
|
||||
"for nodes synced from genesis, or manual verification of block and state roots for checkpoint sync nodes.")
|
||||
return &WeakSubjectivityVerifier{
|
||||
enabled: false,
|
||||
}, nil
|
||||
|
||||
31
beacon-chain/builder/BUILD.bazel
Normal file
31
beacon-chain/builder/BUILD.bazel
Normal file
@@ -0,0 +1,31 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"error.go",
|
||||
"metric.go",
|
||||
"option.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/builder",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client/builder:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/authorization:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
7
beacon-chain/builder/error.go
Normal file
7
beacon-chain/builder/error.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package builder
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
var (
|
||||
ErrNotRunning = errors.New("builder is not running")
|
||||
)
|
||||
37
beacon-chain/builder/metric.go
Normal file
37
beacon-chain/builder/metric.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
submitBlindedBlockLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "submit_blinded_block_latency_milliseconds",
|
||||
Help: "Captures RPC latency for submitting blinded block in milliseconds",
|
||||
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
|
||||
},
|
||||
)
|
||||
getHeaderLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "get_header_latency_milliseconds",
|
||||
Help: "Captures RPC latency for get header in milliseconds",
|
||||
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
|
||||
},
|
||||
)
|
||||
getStatusLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "get_status_latency_milliseconds",
|
||||
Help: "Captures RPC latency for get status in milliseconds",
|
||||
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
|
||||
},
|
||||
)
|
||||
registerValidatorLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "register_validator_latency_milliseconds",
|
||||
Help: "Captures RPC latency for register validator in milliseconds",
|
||||
Buckets: []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000},
|
||||
},
|
||||
)
|
||||
)
|
||||
45
beacon-chain/builder/option.go
Normal file
45
beacon-chain/builder/option.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
"github.com/prysmaticlabs/prysm/network/authorization"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
type Option func(s *Service) error
|
||||
|
||||
// FlagOptions for builder service flag configurations.
|
||||
func FlagOptions(c *cli.Context) ([]Option, error) {
|
||||
endpoint := c.String(flags.MevRelayEndpoint.Name)
|
||||
opts := []Option{
|
||||
WithBuilderEndpoints(endpoint),
|
||||
}
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
// WithBuilderEndpoints sets the endpoint for the beacon chain builder service.
|
||||
func WithBuilderEndpoints(endpoint string) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.builderEndpoint = covertEndPoint(endpoint)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDatabase sets the database for the beacon chain builder service.
|
||||
func WithDatabase(database db.HeadAccessDatabase) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.beaconDB = database
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func covertEndPoint(ep string) network.Endpoint {
|
||||
return network.Endpoint{
|
||||
Url: ep,
|
||||
Auth: network.AuthorizationData{ // Auth is not used for builder.
|
||||
Method: authorization.None,
|
||||
Value: "",
|
||||
}}
|
||||
}
|
||||
134
beacon-chain/builder/service.go
Normal file
134
beacon-chain/builder/service.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/api/client/builder"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// BlockBuilder defines the interface for interacting with the block builder
|
||||
type BlockBuilder interface {
|
||||
SubmitBlindedBlock(ctx context.Context, block *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error)
|
||||
GetHeader(ctx context.Context, slot types.Slot, parentHash [32]byte, pubKey [48]byte) (*ethpb.SignedBuilderBid, error)
|
||||
Status(ctx context.Context) error
|
||||
RegisterValidator(ctx context.Context, reg *ethpb.SignedValidatorRegistrationV1) error
|
||||
}
|
||||
|
||||
// config defines a config struct for dependencies into the service.
|
||||
type config struct {
|
||||
builderEndpoint network.Endpoint
|
||||
beaconDB db.HeadAccessDatabase
|
||||
headFetcher blockchain.HeadFetcher
|
||||
}
|
||||
|
||||
// Service defines a service that provides a client for interacting with the beacon chain and MEV relay network.
|
||||
type Service struct {
|
||||
cfg *config
|
||||
c *builder.Client
|
||||
}
|
||||
|
||||
// NewService instantiates a new service.
|
||||
func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
s := &Service{}
|
||||
for _, opt := range opts {
|
||||
if err := opt(s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if s.cfg.builderEndpoint.Url != "" {
|
||||
c, err := builder.NewClient(s.cfg.builderEndpoint.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.c = c
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Start initializes the service.
|
||||
func (*Service) Start() {}
|
||||
|
||||
// Stop halts the service.
|
||||
func (*Service) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock submits a blinded block to the builder relay network.
|
||||
func (s *Service) SubmitBlindedBlock(ctx context.Context, b *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlock")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
submitBlindedBlockLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
return s.c.SubmitBlindedBlock(ctx, b)
|
||||
}
|
||||
|
||||
// GetHeader retrieves the header for a given slot and parent hash from the builder relay network.
|
||||
func (s *Service) GetHeader(ctx context.Context, slot types.Slot, parentHash [32]byte, pubKey [48]byte) (*ethpb.SignedBuilderBid, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.GetHeader")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
getHeaderLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
return s.c.GetHeader(ctx, slot, parentHash, pubKey)
|
||||
}
|
||||
|
||||
// Status retrieves the status of the builder relay network.
|
||||
func (s *Service) Status(ctx context.Context) error {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.Status")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
getStatusLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
return s.c.Status(ctx)
|
||||
}
|
||||
|
||||
// RegisterValidator registers a validator with the builder relay network.
|
||||
// It also saves the registration object to the DB.
|
||||
func (s *Service) RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.RegisterValidator")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
registerValidatorLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
idxs := make([]types.ValidatorIndex, 0)
|
||||
msgs := make([]*ethpb.ValidatorRegistrationV1, 0)
|
||||
valid := make([]*ethpb.SignedValidatorRegistrationV1, 0)
|
||||
for i := 0; i < len(reg); i++ {
|
||||
r := reg[i]
|
||||
nx, exists := s.cfg.headFetcher.HeadPublicKeyToValidatorIndex(bytesutil.ToBytes48(r.Message.Pubkey))
|
||||
if !exists {
|
||||
// we want to allow validators to set up keys that haven't been added to the beaconstate validator list yet,
|
||||
// so we should tolerate keys that do not seem to be valid by skipping past them.
|
||||
log.Warnf("Skipping validator registration for pubkey=%#x - not in current validator set.", r.Message.Pubkey)
|
||||
continue
|
||||
}
|
||||
idxs = append(idxs, nx)
|
||||
msgs = append(msgs, r.Message)
|
||||
valid = append(valid, r)
|
||||
}
|
||||
if err := s.c.RegisterValidator(ctx, valid); err != nil {
|
||||
return errors.Wrap(err, "could not register validator(s)")
|
||||
}
|
||||
|
||||
return s.cfg.beaconDB.SaveRegistrationsByValidatorIDs(ctx, idxs, msgs)
|
||||
}
|
||||
1
beacon-chain/cache/active_balance.go
vendored
1
beacon-chain/cache/active_balance.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
1
beacon-chain/cache/active_balance_test.go
vendored
1
beacon-chain/cache/active_balance_test.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
6
beacon-chain/cache/committee.go
vendored
6
beacon-chain/cache/committee.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
@@ -103,9 +102,12 @@ func (c *CommitteeCache) Committee(ctx context.Context, slot types.Slot, seed [3
|
||||
|
||||
// AddCommitteeShuffledList adds Committee shuffled list object to the cache. T
|
||||
// his method also trims the least recently list if the cache size has ready the max cache size limit.
|
||||
func (c *CommitteeCache) AddCommitteeShuffledList(committees *Committees) error {
|
||||
func (c *CommitteeCache) AddCommitteeShuffledList(ctx context.Context, committees *Committees) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
key, err := committeeKeyFn(committees)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
3
beacon-chain/cache/committee_disabled.go
vendored
3
beacon-chain/cache/committee_disabled.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
// This file is used in fuzzer builds to bypass global committee caches.
|
||||
package cache
|
||||
@@ -27,7 +26,7 @@ func (c *FakeCommitteeCache) Committee(ctx context.Context, slot types.Slot, see
|
||||
|
||||
// AddCommitteeShuffledList adds Committee shuffled list object to the cache. T
|
||||
// his method also trims the least recently list if the cache size has ready the max cache size limit.
|
||||
func (c *FakeCommitteeCache) AddCommitteeShuffledList(committees *Committees) error {
|
||||
func (c *FakeCommitteeCache) AddCommitteeShuffledList(ctx context.Context, committees *Committees) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
5
beacon-chain/cache/committee_fuzz_test.go
vendored
5
beacon-chain/cache/committee_fuzz_test.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
@@ -31,7 +30,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
fuzzer.Fuzz(c)
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(c))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), c))
|
||||
_, err := cache.Committee(context.Background(), 0, c.Seed, 0)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -46,7 +45,7 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
fuzzer.Fuzz(c)
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(c))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), c))
|
||||
|
||||
indices, err := cache.ActiveIndices(context.Background(), c.Seed)
|
||||
require.NoError(t, err)
|
||||
|
||||
26
beacon-chain/cache/committee_test.go
vendored
26
beacon-chain/cache/committee_test.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
@@ -50,7 +49,7 @@ func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
|
||||
if indices != nil {
|
||||
t.Error("Expected committee not to exist in empty cache")
|
||||
}
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(item))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
|
||||
|
||||
wantedIndex := types.CommitteeIndex(0)
|
||||
indices, err = cache.Committee(context.Background(), slot, item.Seed, wantedIndex)
|
||||
@@ -70,7 +69,7 @@ func TestCommitteeCache_ActiveIndices(t *testing.T) {
|
||||
t.Error("Expected committee not to exist in empty cache")
|
||||
}
|
||||
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(item))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
|
||||
|
||||
indices, err = cache.ActiveIndices(context.Background(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
@@ -85,7 +84,7 @@ func TestCommitteeCache_ActiveCount(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, count, "Expected active count not to exist in empty cache")
|
||||
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(item))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
|
||||
|
||||
count, err = cache.ActiveIndicesCount(context.Background(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
@@ -101,7 +100,7 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
for i := start; i < end; i++ {
|
||||
s := []byte(strconv.Itoa(i))
|
||||
item := &Committees{Seed: bytesutil.ToBytes32(s)}
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(item))
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(context.Background(), item))
|
||||
}
|
||||
|
||||
k := cache.CommitteeCache.Keys()
|
||||
@@ -134,3 +133,20 @@ func TestCommitteeCacheOutOfRange(t *testing.T) {
|
||||
_, err = cache.Committee(context.Background(), 0, seed, math.MaxUint64) // Overflow!
|
||||
require.NotNil(t, err, "Did not fail as expected")
|
||||
}
|
||||
|
||||
func TestCommitteeCache_DoesNothingWhenCancelledContext(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []types.ValidatorIndex{1, 2, 3, 4, 5, 6}}
|
||||
count, err := cache.ActiveIndicesCount(context.Background(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, count, "Expected active count not to exist in empty cache")
|
||||
|
||||
cancelled, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
require.ErrorIs(t, cache.AddCommitteeShuffledList(cancelled, item), context.Canceled)
|
||||
|
||||
count, err = cache.ActiveIndicesCount(context.Background(), item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, count)
|
||||
}
|
||||
|
||||
1
beacon-chain/cache/proposer_indices.go
vendored
1
beacon-chain/cache/proposer_indices.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
// This file is used in fuzzer builds to bypass proposer indices caches.
|
||||
package cache
|
||||
|
||||
1
beacon-chain/cache/proposer_indices_test.go
vendored
1
beacon-chain/cache/proposer_indices_test.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
1
beacon-chain/cache/sync_committee.go
vendored
1
beacon-chain/cache/sync_committee.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -47,7 +47,7 @@ func InitializePrecomputeValidators(ctx context.Context, beaconState state.Beaco
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Set validator's active status for preivous epoch.
|
||||
// Set validator's active status for previous epoch.
|
||||
if helpers.IsActiveValidatorUsingTrie(val, prevEpoch) {
|
||||
v.IsActivePrevEpoch = true
|
||||
bal.ActivePrevEpoch, err = math.Add64(bal.ActivePrevEpoch, val.EffectiveBalance())
|
||||
|
||||
@@ -31,6 +31,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/forks/bellatrix:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
@@ -40,7 +41,6 @@ go_library(
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
@@ -89,6 +89,7 @@ go_test(
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/forks/bellatrix:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
|
||||
@@ -7,11 +7,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/forks/bellatrix"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
@@ -41,7 +40,7 @@ func IsMergeTransitionComplete(st state.BeaconState) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !isEmptyHeader(h), nil
|
||||
return !bellatrix.IsEmptyHeader(h), nil
|
||||
}
|
||||
|
||||
// IsMergeTransitionBlockUsingPreStatePayloadHeader returns true if the input block is the terminal merge block.
|
||||
@@ -51,7 +50,7 @@ func IsMergeTransitionBlockUsingPreStatePayloadHeader(h *ethpb.ExecutionPayloadH
|
||||
if h == nil || body == nil {
|
||||
return false, errors.New("nil header or block body")
|
||||
}
|
||||
if !isEmptyHeader(h) {
|
||||
if !bellatrix.IsEmptyHeader(h) {
|
||||
return false, nil
|
||||
}
|
||||
return IsExecutionBlock(body)
|
||||
@@ -74,7 +73,7 @@ func IsExecutionBlock(body interfaces.BeaconBlockBody) (bool, error) {
|
||||
return false, err
|
||||
default:
|
||||
}
|
||||
return !IsEmptyPayload(payload), nil
|
||||
return !bellatrix.IsEmptyPayload(payload), nil
|
||||
}
|
||||
|
||||
// IsExecutionEnabled returns true if the beacon chain can begin executing.
|
||||
@@ -100,7 +99,7 @@ func IsExecutionEnabled(st state.BeaconState, body interfaces.BeaconBlockBody) (
|
||||
// IsExecutionEnabledUsingHeader returns true if the execution is enabled using post processed payload header and block body.
|
||||
// This is an optimized version of IsExecutionEnabled where beacon state is not required as an argument.
|
||||
func IsExecutionEnabledUsingHeader(header *ethpb.ExecutionPayloadHeader, body interfaces.BeaconBlockBody) (bool, error) {
|
||||
if !isEmptyHeader(header) {
|
||||
if !bellatrix.IsEmptyHeader(header) {
|
||||
return true, nil
|
||||
}
|
||||
return IsExecutionBlock(body)
|
||||
@@ -205,7 +204,7 @@ func ProcessPayload(st state.BeaconState, payload *enginev1.ExecutionPayload) (s
|
||||
return nil, err
|
||||
}
|
||||
|
||||
header, err := PayloadToHeader(payload)
|
||||
header, err := bellatrix.PayloadToHeader(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -215,126 +214,6 @@ func ProcessPayload(st state.BeaconState, payload *enginev1.ExecutionPayload) (s
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// PayloadToHeader converts `payload` into execution payload header format.
|
||||
func PayloadToHeader(payload *enginev1.ExecutionPayload) (*ethpb.ExecutionPayloadHeader, error) {
|
||||
txRoot, err := ssz.TransactionsRoot(payload.Transactions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ðpb.ExecutionPayloadHeader{
|
||||
ParentHash: bytesutil.SafeCopyBytes(payload.ParentHash),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(payload.FeeRecipient),
|
||||
StateRoot: bytesutil.SafeCopyBytes(payload.StateRoot),
|
||||
ReceiptsRoot: bytesutil.SafeCopyBytes(payload.ReceiptsRoot),
|
||||
LogsBloom: bytesutil.SafeCopyBytes(payload.LogsBloom),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(payload.PrevRandao),
|
||||
BlockNumber: payload.BlockNumber,
|
||||
GasLimit: payload.GasLimit,
|
||||
GasUsed: payload.GasUsed,
|
||||
Timestamp: payload.Timestamp,
|
||||
ExtraData: bytesutil.SafeCopyBytes(payload.ExtraData),
|
||||
BaseFeePerGas: bytesutil.SafeCopyBytes(payload.BaseFeePerGas),
|
||||
BlockHash: bytesutil.SafeCopyBytes(payload.BlockHash),
|
||||
TransactionsRoot: txRoot[:],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func IsEmptyPayload(p *enginev1.ExecutionPayload) bool {
|
||||
if p == nil {
|
||||
return true
|
||||
}
|
||||
if !bytes.Equal(p.ParentHash, make([]byte, fieldparams.RootLength)) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(p.FeeRecipient, make([]byte, fieldparams.FeeRecipientLength)) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(p.StateRoot, make([]byte, fieldparams.RootLength)) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(p.ReceiptsRoot, make([]byte, fieldparams.RootLength)) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(p.LogsBloom, make([]byte, fieldparams.LogsBloomLength)) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(p.PrevRandao, make([]byte, fieldparams.RootLength)) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(p.BaseFeePerGas, make([]byte, fieldparams.RootLength)) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(p.BlockHash, make([]byte, fieldparams.RootLength)) {
|
||||
return false
|
||||
}
|
||||
if len(p.Transactions) != 0 {
|
||||
return false
|
||||
}
|
||||
if len(p.ExtraData) != 0 {
|
||||
return false
|
||||
}
|
||||
if p.BlockNumber != 0 {
|
||||
return false
|
||||
}
|
||||
if p.GasLimit != 0 {
|
||||
return false
|
||||
}
|
||||
if p.GasUsed != 0 {
|
||||
return false
|
||||
}
|
||||
if p.Timestamp != 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isEmptyHeader(h *ethpb.ExecutionPayloadHeader) bool {
|
||||
if !bytes.Equal(h.ParentHash, make([]byte, fieldparams.RootLength)) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(h.FeeRecipient, make([]byte, fieldparams.FeeRecipientLength)) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(h.StateRoot, make([]byte, fieldparams.RootLength)) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(h.ReceiptsRoot, make([]byte, fieldparams.RootLength)) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(h.LogsBloom, make([]byte, fieldparams.LogsBloomLength)) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(h.PrevRandao, make([]byte, fieldparams.RootLength)) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(h.BaseFeePerGas, make([]byte, fieldparams.RootLength)) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(h.BlockHash, make([]byte, fieldparams.RootLength)) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(h.TransactionsRoot, make([]byte, fieldparams.RootLength)) {
|
||||
return false
|
||||
}
|
||||
if len(h.ExtraData) != 0 {
|
||||
return false
|
||||
}
|
||||
if h.BlockNumber != 0 {
|
||||
return false
|
||||
}
|
||||
if h.GasLimit != 0 {
|
||||
return false
|
||||
}
|
||||
if h.GasUsed != 0 {
|
||||
return false
|
||||
}
|
||||
if h.Timestamp != 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ValidatePayloadHeaderWhenMergeCompletes validates the payload header when the merge completes.
|
||||
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header *ethpb.ExecutionPayloadHeader) error {
|
||||
// Skip validation if the state is not merge compatible.
|
||||
@@ -393,3 +272,16 @@ func ProcessPayloadHeader(st state.BeaconState, header *ethpb.ExecutionPayloadHe
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// GetBlockPayloadHash returns the hash of the execution payload of the block
|
||||
func GetBlockPayloadHash(blk interfaces.BeaconBlock) ([32]byte, error) {
|
||||
payloadHash := [32]byte{}
|
||||
if IsPreBellatrixVersion(blk.Version()) {
|
||||
return payloadHash, nil
|
||||
}
|
||||
payload, err := blk.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return payloadHash, err
|
||||
}
|
||||
return bytesutil.ToBytes32(payload.BlockHash), nil
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/forks/bellatrix"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
@@ -669,7 +670,7 @@ func Test_ProcessPayload(t *testing.T) {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
require.Equal(t, tt.err, err)
|
||||
want, err := blocks.PayloadToHeader(tt.payload)
|
||||
want, err := bellatrix.PayloadToHeader(tt.payload)
|
||||
require.Equal(t, tt.err, err)
|
||||
got, err := st.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
@@ -824,7 +825,7 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
|
||||
func Test_PayloadToHeader(t *testing.T) {
|
||||
p := emptyPayload()
|
||||
h, err := blocks.PayloadToHeader(p)
|
||||
h, err := bellatrix.PayloadToHeader(p)
|
||||
require.NoError(t, err)
|
||||
txRoot, err := ssz.TransactionsRoot(p.Transactions)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -28,6 +28,7 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -38,16 +39,19 @@ go_test(
|
||||
"attestation_test.go",
|
||||
"justification_finalization_test.go",
|
||||
"new_test.go",
|
||||
"precompute_test.go",
|
||||
"reward_penalty_test.go",
|
||||
"slashing_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -2,6 +2,7 @@ package precompute
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
@@ -9,6 +10,24 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
var errNilState = errors.New("nil state")
|
||||
|
||||
// UnrealizedCheckpoints returns the justification and finalization checkpoints of the
|
||||
// given state as if it was progressed with empty slots until the next epoch.
|
||||
func UnrealizedCheckpoints(st state.BeaconState) (*ethpb.Checkpoint, *ethpb.Checkpoint, error) {
|
||||
if st == nil || st.IsNil() {
|
||||
return nil, nil, errNilState
|
||||
}
|
||||
|
||||
activeBalance, prevTarget, currentTarget, err := st.UnrealizedCheckpointBalances()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
justification := processJustificationBits(st, activeBalance, prevTarget, currentTarget)
|
||||
return computeCheckpoints(st, justification)
|
||||
}
|
||||
|
||||
// ProcessJustificationAndFinalizationPreCompute processes justification and finalization during
|
||||
// epoch processing. This is where a beacon node can justify and finalize a new epoch.
|
||||
// Note: this is an optimized version by passing in precomputed total and attesting balances.
|
||||
@@ -34,12 +53,55 @@ func ProcessJustificationAndFinalizationPreCompute(state state.BeaconState, pBal
|
||||
return state, nil
|
||||
}
|
||||
|
||||
return weighJustificationAndFinalization(state, pBal.ActiveCurrentEpoch, pBal.PrevEpochTargetAttested, pBal.CurrentEpochTargetAttested)
|
||||
newBits := processJustificationBits(state, pBal.ActiveCurrentEpoch, pBal.PrevEpochTargetAttested, pBal.CurrentEpochTargetAttested)
|
||||
|
||||
return weighJustificationAndFinalization(state, newBits)
|
||||
}
|
||||
|
||||
// weighJustificationAndFinalization processes justification and finalization during
|
||||
// processJustificationBits processes the justification bits during epoch processing.
|
||||
func processJustificationBits(state state.BeaconState, totalActiveBalance, prevEpochTargetBalance, currEpochTargetBalance uint64) bitfield.Bitvector4 {
|
||||
newBits := state.JustificationBits()
|
||||
newBits.Shift(1)
|
||||
// If 2/3 or more of total balance attested in the previous epoch.
|
||||
if 3*prevEpochTargetBalance >= 2*totalActiveBalance {
|
||||
newBits.SetBitAt(1, true)
|
||||
}
|
||||
|
||||
if 3*currEpochTargetBalance >= 2*totalActiveBalance {
|
||||
newBits.SetBitAt(0, true)
|
||||
}
|
||||
|
||||
return newBits
|
||||
}
|
||||
|
||||
// updateJustificationAndFinalization processes justification and finalization during
|
||||
// epoch processing. This is where a beacon node can justify and finalize a new epoch.
|
||||
//
|
||||
func weighJustificationAndFinalization(state state.BeaconState, newBits bitfield.Bitvector4) (state.BeaconState, error) {
|
||||
jc, fc, err := computeCheckpoints(state, newBits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := state.SetPreviousJustifiedCheckpoint(state.CurrentJustifiedCheckpoint()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := state.SetCurrentJustifiedCheckpoint(jc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := state.SetJustificationBits(newBits); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := state.SetFinalizedCheckpoint(fc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// computeCheckpoints computes the new Justification and Finalization
|
||||
// checkpoints at epoch transition
|
||||
// Spec pseudocode definition:
|
||||
// def weigh_justification_and_finalization(state: BeaconState,
|
||||
// total_active_balance: Gwei,
|
||||
@@ -77,88 +139,57 @@ func ProcessJustificationAndFinalizationPreCompute(state state.BeaconState, pBal
|
||||
// # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source
|
||||
// if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch:
|
||||
// state.finalized_checkpoint = old_current_justified_checkpoint
|
||||
func weighJustificationAndFinalization(state state.BeaconState,
|
||||
totalActiveBalance, prevEpochTargetBalance, currEpochTargetBalance uint64) (state.BeaconState, error) {
|
||||
func computeCheckpoints(state state.BeaconState, newBits bitfield.Bitvector4) (*ethpb.Checkpoint, *ethpb.Checkpoint, error) {
|
||||
prevEpoch := time.PrevEpoch(state)
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
oldPrevJustifiedCheckpoint := state.PreviousJustifiedCheckpoint()
|
||||
oldCurrJustifiedCheckpoint := state.CurrentJustifiedCheckpoint()
|
||||
|
||||
// Process justifications
|
||||
if err := state.SetPreviousJustifiedCheckpoint(state.CurrentJustifiedCheckpoint()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newBits := state.JustificationBits()
|
||||
newBits.Shift(1)
|
||||
if err := state.SetJustificationBits(newBits); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Note: the spec refers to the bit index position starting at 1 instead of starting at zero.
|
||||
// We will use that paradigm here for consistency with the godoc spec definition.
|
||||
|
||||
// If 2/3 or more of total balance attested in the previous epoch.
|
||||
if 3*prevEpochTargetBalance >= 2*totalActiveBalance {
|
||||
blockRoot, err := helpers.BlockRoot(state, prevEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root for previous epoch %d", prevEpoch)
|
||||
}
|
||||
if err := state.SetCurrentJustifiedCheckpoint(ðpb.Checkpoint{Epoch: prevEpoch, Root: blockRoot}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newBits = state.JustificationBits()
|
||||
newBits.SetBitAt(1, true)
|
||||
if err := state.SetJustificationBits(newBits); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
justifiedCheckpoint := state.CurrentJustifiedCheckpoint()
|
||||
finalizedCheckpoint := state.FinalizedCheckpoint()
|
||||
|
||||
// If 2/3 or more of the total balance attested in the current epoch.
|
||||
if 3*currEpochTargetBalance >= 2*totalActiveBalance {
|
||||
if newBits.BitAt(0) && currentEpoch >= justifiedCheckpoint.Epoch {
|
||||
blockRoot, err := helpers.BlockRoot(state, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root for current epoch %d", prevEpoch)
|
||||
return nil, nil, errors.Wrapf(err, "could not get block root for current epoch %d", currentEpoch)
|
||||
}
|
||||
if err := state.SetCurrentJustifiedCheckpoint(ðpb.Checkpoint{Epoch: currentEpoch, Root: blockRoot}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newBits = state.JustificationBits()
|
||||
newBits.SetBitAt(0, true)
|
||||
if err := state.SetJustificationBits(newBits); err != nil {
|
||||
return nil, err
|
||||
justifiedCheckpoint.Epoch = currentEpoch
|
||||
justifiedCheckpoint.Root = blockRoot
|
||||
} else if newBits.BitAt(1) && prevEpoch >= justifiedCheckpoint.Epoch {
|
||||
// If 2/3 or more of total balance attested in the previous epoch.
|
||||
blockRoot, err := helpers.BlockRoot(state, prevEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get block root for previous epoch %d", prevEpoch)
|
||||
}
|
||||
justifiedCheckpoint.Epoch = prevEpoch
|
||||
justifiedCheckpoint.Root = blockRoot
|
||||
}
|
||||
|
||||
// Process finalization according to Ethereum Beacon Chain specification.
|
||||
justification := state.JustificationBits().Bytes()[0]
|
||||
if len(newBits) == 0 {
|
||||
return nil, nil, errors.New("empty justification bits")
|
||||
}
|
||||
justification := newBits.Bytes()[0]
|
||||
|
||||
// 2nd/3rd/4th (0b1110) most recent epochs are justified, the 2nd using the 4th as source.
|
||||
if justification&0x0E == 0x0E && (oldPrevJustifiedCheckpoint.Epoch+3) == currentEpoch {
|
||||
if err := state.SetFinalizedCheckpoint(oldPrevJustifiedCheckpoint); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
finalizedCheckpoint = oldPrevJustifiedCheckpoint
|
||||
}
|
||||
|
||||
// 2nd/3rd (0b0110) most recent epochs are justified, the 2nd using the 3rd as source.
|
||||
if justification&0x06 == 0x06 && (oldPrevJustifiedCheckpoint.Epoch+2) == currentEpoch {
|
||||
if err := state.SetFinalizedCheckpoint(oldPrevJustifiedCheckpoint); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
finalizedCheckpoint = oldPrevJustifiedCheckpoint
|
||||
}
|
||||
|
||||
// 1st/2nd/3rd (0b0111) most recent epochs are justified, the 1st using the 3rd as source.
|
||||
if justification&0x07 == 0x07 && (oldCurrJustifiedCheckpoint.Epoch+2) == currentEpoch {
|
||||
if err := state.SetFinalizedCheckpoint(oldCurrJustifiedCheckpoint); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
finalizedCheckpoint = oldCurrJustifiedCheckpoint
|
||||
}
|
||||
|
||||
// The 1st/2nd (0b0011) most recent epochs are justified, the 1st using the 2nd as source
|
||||
if justification&0x03 == 0x03 && (oldCurrJustifiedCheckpoint.Epoch+1) == currentEpoch {
|
||||
if err := state.SetFinalizedCheckpoint(oldCurrJustifiedCheckpoint); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
finalizedCheckpoint = oldCurrJustifiedCheckpoint
|
||||
}
|
||||
|
||||
return state, nil
|
||||
return justifiedCheckpoint, finalizedCheckpoint, nil
|
||||
}
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
package precompute_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
@@ -123,3 +126,142 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyPrevEpoch(t *testi
|
||||
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], newState.FinalizedCheckpoint().Root)
|
||||
assert.Equal(t, types.Epoch(0), newState.FinalizedCheckpointEpoch(), "Unexpected finalized epoch")
|
||||
}
|
||||
|
||||
func TestUnrealizedCheckpoints(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
balances := make([]uint64, len(validators))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
pjr := [32]byte{'p'}
|
||||
cjr := [32]byte{'c'}
|
||||
je := types.Epoch(3)
|
||||
fe := types.Epoch(2)
|
||||
pjcp := ðpb.Checkpoint{Root: pjr[:], Epoch: fe}
|
||||
cjcp := ðpb.Checkpoint{Root: cjr[:], Epoch: je}
|
||||
fcp := ðpb.Checkpoint{Root: pjr[:], Epoch: fe}
|
||||
tests := []struct {
|
||||
name string
|
||||
slot types.Slot
|
||||
prevVals, currVals int
|
||||
expectedJustified, expectedFinalized types.Epoch // The expected unrealized checkpoint epochs
|
||||
}{
|
||||
{
|
||||
"Not enough votes, keep previous justification",
|
||||
129,
|
||||
len(validators) / 3,
|
||||
len(validators) / 3,
|
||||
je,
|
||||
fe,
|
||||
},
|
||||
{
|
||||
"Not enough votes, keep previous justification, N+2",
|
||||
161,
|
||||
len(validators) / 3,
|
||||
len(validators) / 3,
|
||||
je,
|
||||
fe,
|
||||
},
|
||||
{
|
||||
"Enough to justify previous epoch but not current",
|
||||
129,
|
||||
2*len(validators)/3 + 3,
|
||||
len(validators) / 3,
|
||||
je,
|
||||
fe,
|
||||
},
|
||||
{
|
||||
"Enough to justify previous epoch but not current, N+2",
|
||||
161,
|
||||
2*len(validators)/3 + 3,
|
||||
len(validators) / 3,
|
||||
je + 1,
|
||||
fe,
|
||||
},
|
||||
{
|
||||
"Enough to justify current epoch",
|
||||
129,
|
||||
len(validators) / 3,
|
||||
2*len(validators)/3 + 3,
|
||||
je + 1,
|
||||
fe,
|
||||
},
|
||||
{
|
||||
"Enough to justify current epoch, but not previous",
|
||||
161,
|
||||
len(validators) / 3,
|
||||
2*len(validators)/3 + 3,
|
||||
je + 2,
|
||||
fe,
|
||||
},
|
||||
{
|
||||
"Enough to justify current and previous",
|
||||
161,
|
||||
2*len(validators)/3 + 3,
|
||||
2*len(validators)/3 + 3,
|
||||
je + 2,
|
||||
fe,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
base := ðpb.BeaconStateAltair{
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
|
||||
Validators: validators,
|
||||
Slot: test.slot,
|
||||
CurrentEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
|
||||
PreviousEpochParticipation: make([]byte, params.BeaconConfig().MinGenesisActiveValidatorCount),
|
||||
Balances: balances,
|
||||
PreviousJustifiedCheckpoint: pjcp,
|
||||
CurrentJustifiedCheckpoint: cjcp,
|
||||
FinalizedCheckpoint: fcp,
|
||||
InactivityScores: make([]uint64, len(validators)),
|
||||
JustificationBits: make(bitfield.Bitvector4, 1),
|
||||
}
|
||||
for i := 0; i < test.prevVals; i++ {
|
||||
base.PreviousEpochParticipation[i] = 0xFF
|
||||
}
|
||||
for i := 0; i < test.currVals; i++ {
|
||||
base.CurrentEpochParticipation[i] = 0xFF
|
||||
}
|
||||
if test.slot > 130 {
|
||||
base.JustificationBits.SetBitAt(2, true)
|
||||
base.JustificationBits.SetBitAt(3, true)
|
||||
} else {
|
||||
base.JustificationBits.SetBitAt(1, true)
|
||||
base.JustificationBits.SetBitAt(2, true)
|
||||
}
|
||||
|
||||
state, err := v2.InitializeFromProto(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = altair.InitializePrecomputeValidators(context.Background(), state)
|
||||
require.NoError(t, err)
|
||||
|
||||
jc, fc, err := precompute.UnrealizedCheckpoints(state)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, test.expectedJustified, jc.Epoch)
|
||||
require.DeepEqual(t, test.expectedFinalized, fc.Epoch)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ComputeCheckpoints_CantUpdateToLower(t *testing.T) {
|
||||
st, err := v2.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch * 2,
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{
|
||||
Epoch: 2,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
jb := make(bitfield.Bitvector4, 1)
|
||||
jb.SetBitAt(1, true)
|
||||
cp, _, err := precompute.ComputeCheckpoints(st, jb)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, types.Epoch(2), cp.Epoch)
|
||||
}
|
||||
|
||||
3
beacon-chain/core/epoch/precompute/precompute_test.go
Normal file
3
beacon-chain/core/epoch/precompute/precompute_test.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package precompute
|
||||
|
||||
var ComputeCheckpoints = computeCheckpoints
|
||||
@@ -285,7 +285,7 @@ func ShuffledIndices(s state.ReadOnlyBeaconState, epoch types.Epoch) ([]types.Va
|
||||
|
||||
// UpdateCommitteeCache gets called at the beginning of every epoch to cache the committee shuffled indices
|
||||
// list with committee index and epoch number. It caches the shuffled indices for current epoch and next epoch.
|
||||
func UpdateCommitteeCache(state state.ReadOnlyBeaconState, epoch types.Epoch) error {
|
||||
func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState, epoch types.Epoch) error {
|
||||
for _, e := range []types.Epoch{epoch, epoch + 1} {
|
||||
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
@@ -311,7 +311,7 @@ func UpdateCommitteeCache(state state.ReadOnlyBeaconState, epoch types.Epoch) er
|
||||
return sortedIndices[i] < sortedIndices[j]
|
||||
})
|
||||
|
||||
if err := committeeCache.AddCommitteeShuffledList(&cache.Committees{
|
||||
if err := committeeCache.AddCommitteeShuffledList(ctx, &cache.Committees{
|
||||
ShuffledIndices: shuffledIndices,
|
||||
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
|
||||
Seed: seed,
|
||||
|
||||
@@ -386,7 +386,7 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, UpdateCommitteeCache(state, time.CurrentEpoch(state)))
|
||||
require.NoError(t, UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state)))
|
||||
|
||||
epoch := types.Epoch(1)
|
||||
idx := types.CommitteeIndex(1)
|
||||
|
||||
@@ -76,6 +76,8 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Spec defines `EffectiveBalanceIncrement` as min to avoid divisions by zero.
|
||||
total = mathutil.Max(params.BeaconConfig().EffectiveBalanceIncrement, total)
|
||||
if err := balanceCache.AddTotalEffectiveBalance(s, total); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -74,6 +74,27 @@ func TestTotalActiveBalance(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestTotalActiveBal_ReturnMin(t *testing.T) {
|
||||
tests := []struct {
|
||||
vCount int
|
||||
}{
|
||||
{1},
|
||||
{10},
|
||||
{10000},
|
||||
}
|
||||
for _, test := range tests {
|
||||
validators := make([]*ethpb.Validator, 0)
|
||||
for i := 0; i < test.vCount; i++ {
|
||||
validators = append(validators, ðpb.Validator{EffectiveBalance: 1, ExitEpoch: 1})
|
||||
}
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
bal, err := TotalActiveBalance(state)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().EffectiveBalanceIncrement, bal)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTotalActiveBalance_WithCache(t *testing.T) {
|
||||
tests := []struct {
|
||||
vCount int
|
||||
|
||||
@@ -126,7 +126,7 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := UpdateCommitteeCache(s, epoch); err != nil {
|
||||
if err := UpdateCommitteeCache(ctx, s, epoch); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update committee cache")
|
||||
}
|
||||
|
||||
@@ -175,7 +175,7 @@ func ActiveValidatorCount(ctx context.Context, s state.ReadOnlyBeaconState, epoc
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := UpdateCommitteeCache(s, epoch); err != nil {
|
||||
if err := UpdateCommitteeCache(ctx, s, epoch); err != nil {
|
||||
return 0, errors.Wrap(err, "could not update committee cache")
|
||||
}
|
||||
|
||||
|
||||
@@ -301,7 +301,7 @@ func TestActiveValidatorCount_Genesis(t *testing.T) {
|
||||
// Preset cache to a bad count.
|
||||
seed, err := Seed(beaconState, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, committeeCache.AddCommitteeShuffledList(&cache.Committees{Seed: seed, ShuffledIndices: []types.ValidatorIndex{1, 2, 3}}))
|
||||
require.NoError(t, committeeCache.AddCommitteeShuffledList(context.Background(), &cache.Committees{Seed: seed, ShuffledIndices: []types.ValidatorIndex{1, 2, 3}}))
|
||||
validatorCount, err := ActiveValidatorCount(context.Background(), beaconState, time.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(c), validatorCount, "Did not get the correct validator count")
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"domain.go",
|
||||
"signature.go",
|
||||
"signing_root.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/signing",
|
||||
@@ -24,6 +25,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"domain_test.go",
|
||||
"signature_test.go",
|
||||
"signing_root_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
@@ -40,6 +42,7 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
33
beacon-chain/core/signing/signature.go
Normal file
33
beacon-chain/core/signing/signature.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package signing
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var ErrNilRegistration = errors.New("nil signed registration")
|
||||
|
||||
// VerifyRegistrationSignature verifies the signature of a validator's registration.
|
||||
func VerifyRegistrationSignature(
|
||||
e types.Epoch,
|
||||
f *ethpb.Fork,
|
||||
sr *ethpb.SignedValidatorRegistrationV1,
|
||||
genesisRoot []byte,
|
||||
) error {
|
||||
if sr == nil || sr.Message == nil {
|
||||
return ErrNilRegistration
|
||||
}
|
||||
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
sd, err := Domain(f, e, d, genesisRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := VerifySigningRoot(sr.Message, sr.Message.Pubkey, sr.Signature, sd); err != nil {
|
||||
return ErrSigFailedToVerify
|
||||
}
|
||||
return nil
|
||||
}
|
||||
44
beacon-chain/core/signing/signature_test.go
Normal file
44
beacon-chain/core/signing/signature_test.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package signing_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
func TestVerifyRegistrationSignature(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
reg := ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("fee"), 20),
|
||||
GasLimit: 123456,
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: sk.PublicKey().Marshal(),
|
||||
}
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
e := slots.ToEpoch(st.Slot())
|
||||
sig, err := signing.ComputeDomainAndSign(st, e, reg, d, sk)
|
||||
require.NoError(t, err)
|
||||
sReg := ðpb.SignedValidatorRegistrationV1{
|
||||
Message: reg,
|
||||
Signature: sig,
|
||||
}
|
||||
f := st.Fork()
|
||||
g := st.GenesisValidatorsRoot()
|
||||
require.NoError(t, signing.VerifyRegistrationSignature(e, f, sReg, g))
|
||||
|
||||
sReg.Signature = []byte("bad")
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(e, f, sReg, g), signing.ErrSigFailedToVerify)
|
||||
|
||||
sReg.Message = nil
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(e, f, sReg, g), signing.ErrNilRegistration)
|
||||
}
|
||||
@@ -53,7 +53,7 @@ func BenchmarkExecuteStateTransition_WithCache(b *testing.B) {
|
||||
// some attestations in block are from previous epoch
|
||||
currentSlot := beaconState.Slot()
|
||||
require.NoError(b, beaconState.SetSlot(beaconState.Slot()-params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(b, helpers.UpdateCommitteeCache(beaconState, time.CurrentEpoch(beaconState)))
|
||||
require.NoError(b, helpers.UpdateCommitteeCache(context.Background(), beaconState, time.CurrentEpoch(beaconState)))
|
||||
require.NoError(b, beaconState.SetSlot(currentSlot))
|
||||
// Run the state transition once to populate the cache.
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
|
||||
@@ -81,7 +81,7 @@ func BenchmarkProcessEpoch_2FullEpochs(b *testing.B) {
|
||||
// some attestations in block are from previous epoch
|
||||
currentSlot := beaconState.Slot()
|
||||
require.NoError(b, beaconState.SetSlot(beaconState.Slot()-params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(b, helpers.UpdateCommitteeCache(beaconState, time.CurrentEpoch(beaconState)))
|
||||
require.NoError(b, helpers.UpdateCommitteeCache(context.Background(), beaconState, time.CurrentEpoch(beaconState)))
|
||||
require.NoError(b, beaconState.SetSlot(currentSlot))
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
mathutil "github.com/prysmaticlabs/prysm/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
@@ -72,7 +73,11 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx types.V
|
||||
exitQueueChurn := uint64(0)
|
||||
err = s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
if val.ExitEpoch() == exitQueueEpoch {
|
||||
exitQueueChurn++
|
||||
var mErr error
|
||||
exitQueueChurn, mErr = mathutil.Add64(exitQueueChurn, 1)
|
||||
if mErr != nil {
|
||||
return mErr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -89,10 +94,16 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx types.V
|
||||
}
|
||||
|
||||
if exitQueueChurn >= churn {
|
||||
exitQueueEpoch++
|
||||
exitQueueEpoch, err = exitQueueEpoch.SafeAdd(1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
validator.ExitEpoch = exitQueueEpoch
|
||||
validator.WithdrawableEpoch = exitQueueEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
validator.WithdrawableEpoch, err = exitQueueEpoch.SafeAddEpoch(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.UpdateValidatorAtIndex(idx, validator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -99,6 +99,17 @@ func TestInitiateValidatorExit_ChurnOverflow(t *testing.T) {
|
||||
assert.Equal(t, wantedEpoch, v.ExitEpoch, "Exit epoch did not cover overflow case")
|
||||
}
|
||||
|
||||
func TestInitiateValidatorExit_WithdrawalOverflows(t *testing.T) {
|
||||
base := ðpb.BeaconState{Validators: []*ethpb.Validator{
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch - 1},
|
||||
{EffectiveBalance: params.BeaconConfig().EjectionBalance, ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
}}
|
||||
state, err := v1.InitializeFromProto(base)
|
||||
require.NoError(t, err)
|
||||
_, err = InitiateValidatorExit(context.Background(), state, 1)
|
||||
require.ErrorContains(t, "addition overflows", err)
|
||||
}
|
||||
|
||||
func TestSlashValidator_OK(t *testing.T) {
|
||||
validatorCount := 100
|
||||
registry := make([]*ethpb.Validator, 0, validatorCount)
|
||||
|
||||
@@ -23,14 +23,14 @@ type ReadOnlyDatabase interface {
|
||||
Block(ctx context.Context, blockRoot [32]byte) (interfaces.SignedBeaconBlock, error)
|
||||
Blocks(ctx context.Context, f *filters.QueryFilter) ([]interfaces.SignedBeaconBlock, [][32]byte, error)
|
||||
BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32]byte, error)
|
||||
BlocksBySlot(ctx context.Context, slot types.Slot) (bool, []interfaces.SignedBeaconBlock, error)
|
||||
BlocksBySlot(ctx context.Context, slot types.Slot) ([]interfaces.SignedBeaconBlock, error)
|
||||
BlockRootsBySlot(ctx context.Context, slot types.Slot) (bool, [][32]byte, error)
|
||||
HasBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
GenesisBlock(ctx context.Context) (interfaces.SignedBeaconBlock, error)
|
||||
GenesisBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (interfaces.SignedBeaconBlock, error)
|
||||
HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]interfaces.SignedBeaconBlock, error)
|
||||
HighestRootsBelowSlot(ctx context.Context, slot types.Slot) (types.Slot, [][32]byte, error)
|
||||
// State related methods.
|
||||
State(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
StateOrError(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
@@ -86,6 +86,7 @@ type NoHeadAccessDatabase interface {
|
||||
RunMigrations(ctx context.Context) error
|
||||
// Fee reicipients operations.
|
||||
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, addrs []common.Address) error
|
||||
SaveRegistrationsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint types.Slot) error
|
||||
}
|
||||
|
||||
@@ -66,6 +66,7 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_prombbolt//:go_default_library",
|
||||
"@com_github_schollz_progressbar_v3//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_etcd_go_bbolt//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
|
||||
@@ -185,17 +185,19 @@ func (s *Store) HasBlock(ctx context.Context, blockRoot [32]byte) bool {
|
||||
}
|
||||
|
||||
// BlocksBySlot retrieves a list of beacon blocks and its respective roots by slot.
|
||||
func (s *Store) BlocksBySlot(ctx context.Context, slot types.Slot) (bool, []interfaces.SignedBeaconBlock, error) {
|
||||
func (s *Store) BlocksBySlot(ctx context.Context, slot types.Slot) ([]interfaces.SignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlocksBySlot")
|
||||
defer span.End()
|
||||
blocks := make([]interfaces.SignedBeaconBlock, 0)
|
||||
|
||||
blocks := make([]interfaces.SignedBeaconBlock, 0)
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
|
||||
keys := blockRootsBySlot(ctx, tx, slot)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
encoded := bkt.Get(keys[i])
|
||||
roots, err := blockRootsBySlot(ctx, tx, slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve blocks by slot")
|
||||
}
|
||||
for _, r := range roots {
|
||||
encoded := bkt.Get(r[:])
|
||||
blk, err := unmarshalBlock(ctx, encoded)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -204,7 +206,7 @@ func (s *Store) BlocksBySlot(ctx context.Context, slot types.Slot) (bool, []inte
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return len(blocks) > 0, blocks, err
|
||||
return blocks, err
|
||||
}
|
||||
|
||||
// BlockRootsBySlot retrieves a list of beacon block roots by slot
|
||||
@@ -213,11 +215,9 @@ func (s *Store) BlockRootsBySlot(ctx context.Context, slot types.Slot) (bool, []
|
||||
defer span.End()
|
||||
blockRoots := make([][32]byte, 0)
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
keys := blockRootsBySlot(ctx, tx, slot)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
blockRoots = append(blockRoots, bytesutil.ToBytes32(keys[i]))
|
||||
}
|
||||
return nil
|
||||
var err error
|
||||
blockRoots, err = blockRootsBySlot(ctx, tx, slot)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return false, nil, errors.Wrap(err, "could not retrieve block roots by slot")
|
||||
@@ -398,50 +398,63 @@ func (s *Store) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) e
|
||||
})
|
||||
}
|
||||
|
||||
// HighestSlotBlocksBelow returns the block with the highest slot below the input slot from the db.
|
||||
func (s *Store) HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]interfaces.SignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HighestSlotBlocksBelow")
|
||||
// HighestRootsBelowSlot returns roots from the database slot index from the highest slot below the input slot.
|
||||
// The slot value at the beginning of the return list is the slot where the roots were found. This is helpful so that
|
||||
// calling code can make decisions based on the slot without resolving the blocks to discover their slot (for instance
|
||||
// checking which root is canonical in fork choice, which operates purely on roots,
|
||||
// then if no canonical block is found, continuing to search through lower slots).
|
||||
func (s *Store) HighestRootsBelowSlot(ctx context.Context, slot types.Slot) (fs types.Slot, roots [][32]byte, err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HighestRootsBelowSlot")
|
||||
defer span.End()
|
||||
|
||||
var best []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
sk := bytesutil.Uint64ToBytesBigEndian(uint64(slot))
|
||||
err = s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
// Iterate through the index, which is in byte sorted order.
|
||||
c := bkt.Cursor()
|
||||
for s, root := c.First(); s != nil; s, root = c.Next() {
|
||||
// The documentation for Seek says:
|
||||
// "If the key does not exist then the next key is used. If no keys follow, a nil key is returned."
|
||||
seekPast := func(ic *bolt.Cursor, k []byte) ([]byte, []byte) {
|
||||
ik, iv := ic.Seek(k)
|
||||
// So if there are slots in the index higher than the requested slot, sl will be equal to the key that is
|
||||
// one higher than the value we want. If the slot argument is higher than the highest value in the index,
|
||||
// we'll get a nil value for `sl`. In that case we'll go backwards from Cursor.Last().
|
||||
if ik == nil {
|
||||
return ic.Last()
|
||||
}
|
||||
return ik, iv
|
||||
}
|
||||
// re loop condition: when .Prev() rewinds past the beginning off the collection, the loop will terminate,
|
||||
// because `sl` will be nil. If we don't find a value for `root` before iteration ends,
|
||||
// `root` will be the zero value, in which case this function will return the genesis block.
|
||||
for sl, r := seekPast(c, sk); sl != nil; sl, r = c.Prev() {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
key := bytesutil.BytesToSlotBigEndian(s)
|
||||
if root == nil {
|
||||
if r == nil {
|
||||
continue
|
||||
}
|
||||
if key >= slot {
|
||||
break
|
||||
fs = bytesutil.BytesToSlotBigEndian(sl)
|
||||
// Iterating through the index using .Prev will move from higher to lower, so the first key we find behind
|
||||
// the requested slot must be the highest block below that slot.
|
||||
if slot > fs {
|
||||
roots, err = splitRoots(r)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing packed roots %#x", r)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
best = root
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
})
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
if len(roots) == 0 || (len(roots) == 1 && roots[0] == params.BeaconConfig().ZeroHash) {
|
||||
gr, err := s.GenesisBlockRoot(ctx)
|
||||
return 0, [][32]byte{gr}, err
|
||||
}
|
||||
|
||||
var blk interfaces.SignedBeaconBlock
|
||||
var err error
|
||||
if best != nil {
|
||||
blk, err = s.Block(ctx, bytesutil.ToBytes32(best))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if blk == nil || blk.IsNil() {
|
||||
blk, err = s.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return []interfaces.SignedBeaconBlock{blk}, nil
|
||||
return fs, roots, nil
|
||||
}
|
||||
|
||||
// FeeRecipientByValidatorID returns the fee recipient for a validator id.
|
||||
@@ -453,8 +466,20 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id types.Validato
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(feeRecipientBucket)
|
||||
addr = bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
|
||||
// IF the fee recipient is not found in the standard fee recipient bucket, then
|
||||
// check the registration bucket. The fee recipient may be there.
|
||||
// This is to resolve imcompatility until we fully migrate to the registration bucket.
|
||||
if addr == nil {
|
||||
return errors.Wrapf(ErrNotFoundFeeRecipient, "validator id %d", id)
|
||||
bkt = tx.Bucket(registrationBucket)
|
||||
enc := bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
|
||||
if enc == nil {
|
||||
return errors.Wrapf(ErrNotFoundFeeRecipient, "validator id %d", id)
|
||||
}
|
||||
reg := ðpb.ValidatorRegistrationV1{}
|
||||
if err := decode(ctx, enc, reg); err != nil {
|
||||
return err
|
||||
}
|
||||
addr = reg.FeeRecipient
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -482,6 +507,48 @@ func (s *Store) SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []types
|
||||
})
|
||||
}
|
||||
|
||||
// RegistrationByValidatorID returns the validator registration object for a validator id.
|
||||
// `ErrNotFoundFeeRecipient` is returned if the validator id is not found.
|
||||
func (s *Store) RegistrationByValidatorID(ctx context.Context, id types.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.RegistrationByValidatorID")
|
||||
defer span.End()
|
||||
reg := ðpb.ValidatorRegistrationV1{}
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(registrationBucket)
|
||||
enc := bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
|
||||
if enc == nil {
|
||||
return errors.Wrapf(ErrNotFoundFeeRecipient, "validator id %d", id)
|
||||
}
|
||||
return decode(ctx, enc, reg)
|
||||
})
|
||||
return reg, err
|
||||
}
|
||||
|
||||
// SaveRegistrationsByValidatorIDs saves the validator registrations for validator ids.
|
||||
// Error is returned if `ids` and `registrations` are not the same length.
|
||||
func (s *Store) SaveRegistrationsByValidatorIDs(ctx context.Context, ids []types.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveRegistrationsByValidatorIDs")
|
||||
defer span.End()
|
||||
|
||||
if len(ids) != len(regs) {
|
||||
return errors.New("ids and registrations must be the same length")
|
||||
}
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(registrationBucket)
|
||||
for i, id := range ids {
|
||||
enc, err := encode(ctx, regs[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bkt.Put(bytesutil.Uint64ToBytesBigEndian(uint64(id)), enc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// blockRootsByFilter retrieves the block roots given the filter criteria.
|
||||
func blockRootsByFilter(ctx context.Context, tx *bolt.Tx, f *filters.QueryFilter) ([][]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.blockRootsByFilter")
|
||||
@@ -611,21 +678,22 @@ func blockRootsBySlotRange(
|
||||
}
|
||||
|
||||
// blockRootsBySlot retrieves the block roots by slot
|
||||
func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) [][]byte {
|
||||
func blockRootsBySlot(ctx context.Context, tx *bolt.Tx, slot types.Slot) ([][32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlot")
|
||||
defer span.End()
|
||||
|
||||
roots := make([][]byte, 0)
|
||||
bkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
key := bytesutil.SlotToBytesBigEndian(slot)
|
||||
c := bkt.Cursor()
|
||||
k, v := c.Seek(key)
|
||||
if k != nil && bytes.Equal(k, key) {
|
||||
for i := 0; i < len(v); i += 32 {
|
||||
roots = append(roots, v[i:i+32])
|
||||
r, err := splitRoots(v)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "corrupt value in block slot index for slot=%d", slot)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
return roots
|
||||
return [][32]byte{}, nil
|
||||
}
|
||||
|
||||
// createBlockIndicesFromBlock takes in a beacon block and returns
|
||||
|
||||
@@ -517,18 +517,32 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
require.NoError(t, db.SaveBlock(ctx, block2))
|
||||
require.NoError(t, db.SaveBlock(ctx, block3))
|
||||
|
||||
highestAt, err := db.HighestSlotBlocksBelow(ctx, 2)
|
||||
_, roots, err := db.HighestRootsBelowSlot(ctx, 2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, len(highestAt) <= 0, "Got empty highest at slice")
|
||||
assert.Equal(t, true, proto.Equal(block1.Proto(), highestAt[0].Proto()), "Wanted: %v, received: %v", block1, highestAt[0])
|
||||
highestAt, err = db.HighestSlotBlocksBelow(ctx, 11)
|
||||
assert.Equal(t, false, len(roots) <= 0, "Got empty highest at slice")
|
||||
require.Equal(t, 1, len(roots))
|
||||
root := roots[0]
|
||||
b, err := db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, len(highestAt) <= 0, "Got empty highest at slice")
|
||||
assert.Equal(t, true, proto.Equal(block2.Proto(), highestAt[0].Proto()), "Wanted: %v, received: %v", block2, highestAt[0])
|
||||
highestAt, err = db.HighestSlotBlocksBelow(ctx, 101)
|
||||
assert.Equal(t, true, proto.Equal(block1.Proto(), b.Proto()), "Wanted: %v, received: %v", block1, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 11)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, len(highestAt) <= 0, "Got empty highest at slice")
|
||||
assert.Equal(t, true, proto.Equal(block3.Proto(), highestAt[0].Proto()), "Wanted: %v, received: %v", block3, highestAt[0])
|
||||
assert.Equal(t, false, len(roots) <= 0, "Got empty highest at slice")
|
||||
require.Equal(t, 1, len(roots))
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block2.Proto(), b.Proto()), "Wanted: %v, received: %v", block2, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 101)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, len(roots) <= 0, "Got empty highest at slice")
|
||||
require.Equal(t, 1, len(roots))
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block3.Proto(), b.Proto()), "Wanted: %v, received: %v", block3, b)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -549,15 +563,29 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, block1))
|
||||
|
||||
highestAt, err := db.HighestSlotBlocksBelow(ctx, 2)
|
||||
_, roots, err := db.HighestRootsBelowSlot(ctx, 2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(block1.Proto(), highestAt[0].Proto()), "Wanted: %v, received: %v", block1, highestAt[0])
|
||||
highestAt, err = db.HighestSlotBlocksBelow(ctx, 1)
|
||||
require.Equal(t, 1, len(roots))
|
||||
root := roots[0]
|
||||
b, err := db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(genesisBlock.Proto(), highestAt[0].Proto()), "Wanted: %v, received: %v", genesisBlock, highestAt[0])
|
||||
highestAt, err = db.HighestSlotBlocksBelow(ctx, 0)
|
||||
assert.Equal(t, true, proto.Equal(block1.Proto(), b.Proto()), "Wanted: %v, received: %v", block1, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(genesisBlock.Proto(), highestAt[0].Proto()), "Wanted: %v, received: %v", genesisBlock, highestAt[0])
|
||||
require.Equal(t, 1, len(roots))
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(genesisBlock.Proto(), b.Proto()), "Wanted: %v, received: %v", genesisBlock, b)
|
||||
|
||||
_, roots, err = db.HighestRootsBelowSlot(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(roots))
|
||||
root = roots[0]
|
||||
b, err = db.Block(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(genesisBlock.Proto(), b.Proto()), "Wanted: %v, received: %v", genesisBlock, b)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -638,22 +666,21 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
r3, err := b3.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
hasBlocks, retrievedBlocks, err := db.BlocksBySlot(ctx, 1)
|
||||
retrievedBlocks, err := db.BlocksBySlot(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(retrievedBlocks), "Unexpected number of blocks received, expected none")
|
||||
assert.Equal(t, false, hasBlocks, "Expected no blocks")
|
||||
hasBlocks, retrievedBlocks, err = db.BlocksBySlot(ctx, 20)
|
||||
retrievedBlocks, err = db.BlocksBySlot(ctx, 20)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(b1.Proto(), retrievedBlocks[0].Proto()), "Wanted: %v, received: %v", b1, retrievedBlocks[0])
|
||||
assert.Equal(t, true, hasBlocks, "Expected to have blocks")
|
||||
hasBlocks, retrievedBlocks, err = db.BlocksBySlot(ctx, 100)
|
||||
assert.Equal(t, true, len(retrievedBlocks) > 0, "Expected to have blocks")
|
||||
retrievedBlocks, err = db.BlocksBySlot(ctx, 100)
|
||||
require.NoError(t, err)
|
||||
if len(retrievedBlocks) != 2 {
|
||||
t.Fatalf("Expected 2 blocks, received %d blocks", len(retrievedBlocks))
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(b2.Proto(), retrievedBlocks[0].Proto()), "Wanted: %v, received: %v", b2, retrievedBlocks[0])
|
||||
assert.Equal(t, true, proto.Equal(b3.Proto(), retrievedBlocks[1].Proto()), "Wanted: %v, received: %v", b3, retrievedBlocks[1])
|
||||
assert.Equal(t, true, hasBlocks, "Expected to have blocks")
|
||||
assert.Equal(t, true, len(retrievedBlocks) > 0, "Expected to have blocks")
|
||||
|
||||
hasBlockRoots, retrievedBlockRoots, err := db.BlockRootsBySlot(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -693,4 +720,78 @@ func TestStore_FeeRecipientByValidatorID(t *testing.T) {
|
||||
_, err = db.FeeRecipientByValidatorID(ctx, 3)
|
||||
want := errors.Wrap(ErrNotFoundFeeRecipient, "validator id 3")
|
||||
require.Equal(t, want.Error(), err.Error())
|
||||
|
||||
regs := []*ethpb.ValidatorRegistrationV1{
|
||||
{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("a"), 20),
|
||||
GasLimit: 1,
|
||||
Timestamp: 2,
|
||||
Pubkey: bytesutil.PadTo([]byte("b"), 48),
|
||||
}}
|
||||
require.NoError(t, db.SaveRegistrationsByValidatorIDs(ctx, []types.ValidatorIndex{3}, regs))
|
||||
f, err = db.FeeRecipientByValidatorID(ctx, 3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, common.Address{'a'}, f)
|
||||
|
||||
_, err = db.FeeRecipientByValidatorID(ctx, 4)
|
||||
want = errors.Wrap(ErrNotFoundFeeRecipient, "validator id 4")
|
||||
require.Equal(t, want.Error(), err.Error())
|
||||
}
|
||||
|
||||
func TestStore_RegistrationsByValidatorID(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
ids := []types.ValidatorIndex{0, 0, 0}
|
||||
regs := []*ethpb.ValidatorRegistrationV1{{}, {}, {}, {}}
|
||||
require.ErrorContains(t, "ids and registrations must be the same length", db.SaveRegistrationsByValidatorIDs(ctx, ids, regs))
|
||||
|
||||
ids = []types.ValidatorIndex{0, 1, 2}
|
||||
regs = []*ethpb.ValidatorRegistrationV1{
|
||||
{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("a"), 20),
|
||||
GasLimit: 1,
|
||||
Timestamp: 2,
|
||||
Pubkey: bytesutil.PadTo([]byte("b"), 48),
|
||||
},
|
||||
{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("c"), 20),
|
||||
GasLimit: 3,
|
||||
Timestamp: 4,
|
||||
Pubkey: bytesutil.PadTo([]byte("d"), 48),
|
||||
},
|
||||
{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("e"), 20),
|
||||
GasLimit: 5,
|
||||
Timestamp: 6,
|
||||
Pubkey: bytesutil.PadTo([]byte("f"), 48),
|
||||
},
|
||||
}
|
||||
require.NoError(t, db.SaveRegistrationsByValidatorIDs(ctx, ids, regs))
|
||||
f, err := db.RegistrationByValidatorID(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("a"), 20),
|
||||
GasLimit: 1,
|
||||
Timestamp: 2,
|
||||
Pubkey: bytesutil.PadTo([]byte("b"), 48),
|
||||
}, f)
|
||||
f, err = db.RegistrationByValidatorID(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("c"), 20),
|
||||
GasLimit: 3,
|
||||
Timestamp: 4,
|
||||
Pubkey: bytesutil.PadTo([]byte("d"), 48),
|
||||
}, f)
|
||||
f, err = db.RegistrationByValidatorID(ctx, 2)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("e"), 20),
|
||||
GasLimit: 5,
|
||||
Timestamp: 6,
|
||||
Pubkey: bytesutil.PadTo([]byte("f"), 48),
|
||||
}, f)
|
||||
_, err = db.RegistrationByValidatorID(ctx, 3)
|
||||
want := errors.Wrap(ErrNotFoundFeeRecipient, "validator id 3")
|
||||
require.Equal(t, want.Error(), err.Error())
|
||||
}
|
||||
|
||||
@@ -70,6 +70,8 @@ func isSSZStorageFormat(obj interface{}) bool {
|
||||
return true
|
||||
case *ethpb.VoluntaryExit:
|
||||
return true
|
||||
case *ethpb.ValidatorRegistrationV1:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -105,7 +105,6 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
}
|
||||
}
|
||||
datafile := KVStoreDatafilePath(dirPath)
|
||||
start := time.Now()
|
||||
log.Infof("Opening Bolt DB at %s", datafile)
|
||||
boltDB, err := bolt.Open(
|
||||
datafile,
|
||||
@@ -116,40 +115,29 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
log.WithField("elapsed", time.Since(start)).Error("Failed to open Bolt DB")
|
||||
if errors.Is(err, bolt.ErrTimeout) {
|
||||
return nil, errors.New("cannot obtain database lock, database may be in use by another process")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Opened Bolt DB")
|
||||
|
||||
boltDB.AllocSize = boltAllocSize
|
||||
start = time.Now()
|
||||
log.Infof("Creating block cache...")
|
||||
blockCache, err := ristretto.NewCache(&ristretto.Config{
|
||||
NumCounters: 1000, // number of keys to track frequency of (1000).
|
||||
MaxCost: BlockCacheSize, // maximum cost of cache (1000 Blocks).
|
||||
BufferItems: 64, // number of keys per Get buffer.
|
||||
})
|
||||
if err != nil {
|
||||
log.WithField("elapsed", time.Since(start)).Error("Failed to create block cache")
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Created block cache")
|
||||
|
||||
start = time.Now()
|
||||
log.Infof("Creating validator cache...")
|
||||
validatorCache, err := ristretto.NewCache(&ristretto.Config{
|
||||
NumCounters: NumOfValidatorEntries, // number of entries in cache (2 Million).
|
||||
MaxCost: ValidatorEntryMaxCost, // maximum size of the cache (64Mb)
|
||||
BufferItems: 64, // number of keys per Get buffer.
|
||||
})
|
||||
if err != nil {
|
||||
log.WithField("elapsed", time.Since(start)).Error("Failed to to create validator cache")
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Created validator cache")
|
||||
|
||||
kv := &Store{
|
||||
db: boltDB,
|
||||
@@ -159,8 +147,6 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
stateSummaryCache: newStateSummaryCache(),
|
||||
ctx: ctx,
|
||||
}
|
||||
start = time.Now()
|
||||
log.Infof("Updating DB and creating buckets...")
|
||||
if err := kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return createBuckets(
|
||||
tx,
|
||||
@@ -192,15 +178,12 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
migrationsBucket,
|
||||
|
||||
feeRecipientBucket,
|
||||
registrationBucket,
|
||||
)
|
||||
}); err != nil {
|
||||
log.WithField("elapsed", time.Since(start)).Error("Failed to update db and create buckets")
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Updated db and created buckets")
|
||||
|
||||
err = prometheus.Register(createBoltCollector(kv.db))
|
||||
|
||||
return kv, err
|
||||
}
|
||||
|
||||
|
||||
@@ -7,10 +7,11 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/snappy"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/progress"
|
||||
v1alpha1 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/schollz/progressbar/v3"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
@@ -83,104 +84,10 @@ func migrateStateValidators(ctx context.Context, db *bolt.DB) error {
|
||||
// prepare the progress bar with the total count of the keys to migrate
|
||||
bar := progress.InitializeProgressBar(len(keys), "Migrating state validators to new schema.")
|
||||
|
||||
batchNo := 0
|
||||
for batchIndex := 0; batchIndex < len(keys); batchIndex += batchSize {
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
//create the source and destination buckets
|
||||
stateBkt := tx.Bucket(stateBucket)
|
||||
if stateBkt == nil {
|
||||
return nil
|
||||
}
|
||||
valBkt := tx.Bucket(stateValidatorsBucket)
|
||||
if valBkt == nil {
|
||||
return nil
|
||||
}
|
||||
indexBkt := tx.Bucket(blockRootValidatorHashesBucket)
|
||||
if indexBkt == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrate the key values for this batch
|
||||
cursor := stateBkt.Cursor()
|
||||
count := 0
|
||||
index := batchIndex
|
||||
for _, v := cursor.Seek(keys[index]); count < batchSize && index < len(keys); _, v = cursor.Next() {
|
||||
enc, err := snappy.Decode(nil, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case hasAltairKey(enc):
|
||||
protoState := &v1alpha1.BeaconStateAltair{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(altairKey):]); err != nil {
|
||||
return errors.Wrap(err, "failed to unmarshal encoding for altair")
|
||||
}
|
||||
// no validators in state to migrate
|
||||
if len(protoState.Validators) == 0 {
|
||||
return fmt.Errorf("no validator entries in state key 0x%s", hexutil.Encode(keys[index]))
|
||||
}
|
||||
validatorKeys, insertErr := insertValidatorHashes(ctx, protoState.Validators, valBkt)
|
||||
if insertErr != nil {
|
||||
return insertErr
|
||||
}
|
||||
// add the validator entry keys for a given block root.
|
||||
compValidatorKeys := snappy.Encode(nil, validatorKeys)
|
||||
idxErr := indexBkt.Put(keys[index], compValidatorKeys)
|
||||
if idxErr != nil {
|
||||
return idxErr
|
||||
}
|
||||
// zero the validator entries in BeaconState object .
|
||||
protoState.Validators = make([]*v1alpha1.Validator, 0)
|
||||
rawObj, err := protoState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stateBytes := snappy.Encode(nil, append(altairKey, rawObj...))
|
||||
if stateErr := stateBkt.Put(keys[index], stateBytes); stateErr != nil {
|
||||
return stateErr
|
||||
}
|
||||
default:
|
||||
protoState := &v1alpha1.BeaconState{}
|
||||
if err := protoState.UnmarshalSSZ(enc); err != nil {
|
||||
return errors.Wrap(err, "failed to unmarshal encoding for phase0")
|
||||
}
|
||||
// no validators in state to migrate
|
||||
if len(protoState.Validators) == 0 {
|
||||
return fmt.Errorf("no validator entries in state key 0x%s", hexutil.Encode(keys[index]))
|
||||
}
|
||||
validatorKeys, insertErr := insertValidatorHashes(ctx, protoState.Validators, valBkt)
|
||||
if insertErr != nil {
|
||||
return insertErr
|
||||
}
|
||||
// add the validator entry keys for a given block root.
|
||||
compValidatorKeys := snappy.Encode(nil, validatorKeys)
|
||||
idxErr := indexBkt.Put(keys[index], compValidatorKeys)
|
||||
if idxErr != nil {
|
||||
return idxErr
|
||||
}
|
||||
// zero the validator entries in BeaconState object .
|
||||
protoState.Validators = make([]*v1alpha1.Validator, 0)
|
||||
stateBytes, err := encode(ctx, protoState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stateErr := stateBkt.Put(keys[index], stateBytes); stateErr != nil {
|
||||
return stateErr
|
||||
}
|
||||
}
|
||||
count++
|
||||
index++
|
||||
|
||||
if barErr := bar.Add(1); barErr != nil {
|
||||
return barErr
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
if err := db.Update(performValidatorStateMigration(ctx, bar, batchIndex, keys)); err != nil {
|
||||
return err
|
||||
}
|
||||
batchNo++
|
||||
}
|
||||
|
||||
// set the migration entry to done
|
||||
@@ -197,6 +104,87 @@ func migrateStateValidators(ctx context.Context, db *bolt.DB) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func performValidatorStateMigration(ctx context.Context, bar *progressbar.ProgressBar, batchIndex int, keys [][]byte) func(tx *bolt.Tx) error {
|
||||
return func(tx *bolt.Tx) error {
|
||||
//create the source and destination buckets
|
||||
stateBkt := tx.Bucket(stateBucket)
|
||||
if stateBkt == nil {
|
||||
return nil
|
||||
}
|
||||
valBkt := tx.Bucket(stateValidatorsBucket)
|
||||
if valBkt == nil {
|
||||
return nil
|
||||
}
|
||||
indexBkt := tx.Bucket(blockRootValidatorHashesBucket)
|
||||
if indexBkt == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrate the key values for this batch
|
||||
cursor := stateBkt.Cursor()
|
||||
count := 0
|
||||
index := batchIndex
|
||||
for _, v := cursor.Seek(keys[index]); count < batchSize && index < len(keys); _, v = cursor.Next() {
|
||||
enc, err := snappy.Decode(nil, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
item := enc
|
||||
if hasAltairKey(item) {
|
||||
item = item[len(altairKey):]
|
||||
}
|
||||
detector, err := detect.FromState(item)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
beaconState, err := detector.UnmarshalBeaconState(item)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// no validators in state to migrate
|
||||
if beaconState.NumValidators() == 0 {
|
||||
return fmt.Errorf("no validator entries in state key 0x%s", hexutil.Encode(keys[index]))
|
||||
}
|
||||
vals := beaconState.Validators()
|
||||
validatorKeys, insertErr := insertValidatorHashes(ctx, vals, valBkt)
|
||||
if insertErr != nil {
|
||||
return insertErr
|
||||
}
|
||||
// add the validator entry keys for a given block root.
|
||||
compValidatorKeys := snappy.Encode(nil, validatorKeys)
|
||||
idxErr := indexBkt.Put(keys[index], compValidatorKeys)
|
||||
if idxErr != nil {
|
||||
return idxErr
|
||||
}
|
||||
// zero the validator entries in BeaconState object .
|
||||
if err := beaconState.SetValidators(make([]*v1alpha1.Validator, 0)); err != nil {
|
||||
return err
|
||||
}
|
||||
rawObj, err := beaconState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var stateBytes []byte
|
||||
if hasAltairKey(enc) {
|
||||
stateBytes = snappy.Encode(nil, append(altairKey, rawObj...))
|
||||
} else {
|
||||
stateBytes = snappy.Encode(nil, rawObj)
|
||||
}
|
||||
if stateErr := stateBkt.Put(keys[index], stateBytes); stateErr != nil {
|
||||
return stateErr
|
||||
}
|
||||
count++
|
||||
index++
|
||||
|
||||
if barErr := bar.Add(1); barErr != nil {
|
||||
return barErr
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func stateBucketKeys(stateBucket *bolt.Bucket) ([][]byte, error) {
|
||||
var keys [][]byte
|
||||
if err := stateBucket.ForEach(func(pubKey, v []byte) error {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
v1alpha1 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
@@ -291,6 +292,12 @@ func Test_migrateAltairStateValidators(t *testing.T) {
|
||||
vals := validators(10)
|
||||
blockRoot := [32]byte{'A'}
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, 20)
|
||||
err := st.SetFork(&v1alpha1.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().AltairForkVersion,
|
||||
Epoch: 0,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, st.SetSlot(100))
|
||||
assert.NoError(t, st.SetValidators(vals))
|
||||
assert.NoError(t, dbStore.SaveState(context.Background(), st, blockRoot))
|
||||
|
||||
@@ -19,6 +19,7 @@ var (
|
||||
powchainBucket = []byte("powchain")
|
||||
stateValidatorsBucket = []byte("state-validators")
|
||||
feeRecipientBucket = []byte("fee-recipient")
|
||||
registrationBucket = []byte("registration")
|
||||
|
||||
// Deprecated: This bucket was migrated in PR 6461. Do not use, except for migrations.
|
||||
slotsHasObjectBucket = []byte("slots-has-objects")
|
||||
|
||||
@@ -162,12 +162,27 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
|
||||
if states == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
validatorKeys, validatorsEntries, err := getValidators(states)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
return s.saveStatesEfficientInternal(ctx, tx, blockRoots, states, validatorKeys, validatorsEntries)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getValidators(states []state.ReadOnlyBeaconState) ([][]byte, map[string]*ethpb.Validator, error) {
|
||||
validatorsEntries := make(map[string]*ethpb.Validator) // It's a map to make sure that you store only new validator entries.
|
||||
validatorKeys := make([][]byte, len(states)) // For every state, this stores a compressed list of validator keys.
|
||||
for i, st := range states {
|
||||
pb, ok := st.InnerStateUnsafe().(withValidators)
|
||||
if !ok {
|
||||
return errors.New("could not cast state to interface with GetValidators()")
|
||||
return nil, nil, errors.New("could not cast state to interface with GetValidators()")
|
||||
}
|
||||
validators := pb.GetValidators()
|
||||
|
||||
@@ -177,7 +192,7 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
|
||||
// create the unique hash for that validator entry.
|
||||
hash, hashErr := val.HashTreeRoot()
|
||||
if hashErr != nil {
|
||||
return hashErr
|
||||
return nil, nil, hashErr
|
||||
}
|
||||
hashes = append(hashes, hash[:]...)
|
||||
|
||||
@@ -187,117 +202,113 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
|
||||
}
|
||||
validatorKeys[i] = snappy.Encode(nil, hashes)
|
||||
}
|
||||
return validatorKeys, validatorsEntries, nil
|
||||
}
|
||||
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateBucket)
|
||||
valIdxBkt := tx.Bucket(blockRootValidatorHashesBucket)
|
||||
for i, rt := range blockRoots {
|
||||
indicesByBucket := createStateIndicesFromStateSlot(ctx, states[i].Slot())
|
||||
if err := updateValueForIndices(ctx, indicesByBucket, rt[:], tx); err != nil {
|
||||
return errors.Wrap(err, "could not update DB indices")
|
||||
}
|
||||
|
||||
// There is a gap when the states that are passed are used outside this
|
||||
// thread. But while storing the state object, we should not store the
|
||||
// validator entries.To bring the gap closer, we empty the validators
|
||||
// just before Put() and repopulate that state with original validators.
|
||||
// look at issue https://github.com/prysmaticlabs/prysm/issues/9262.
|
||||
switch rawType := states[i].InnerStateUnsafe().(type) {
|
||||
case *ethpb.BeaconState:
|
||||
var pbState *ethpb.BeaconState
|
||||
var err error
|
||||
if features.Get().EnableNativeState {
|
||||
pbState, err = state_native.ProtobufBeaconStatePhase0(rawType)
|
||||
} else {
|
||||
pbState, err = v1.ProtobufBeaconState(rawType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
encodedState, err := encode(ctx, pbState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateAltair:
|
||||
var pbState *ethpb.BeaconStateAltair
|
||||
var err error
|
||||
if features.Get().EnableNativeState {
|
||||
pbState, err = state_native.ProtobufBeaconStateAltair(rawType)
|
||||
} else {
|
||||
pbState, err = v2.ProtobufBeaconState(rawType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(altairKey, rawObj...))
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateBellatrix:
|
||||
var pbState *ethpb.BeaconStateBellatrix
|
||||
var err error
|
||||
if features.Get().EnableNativeState {
|
||||
pbState, err = state_native.ProtobufBeaconStateBellatrix(rawType)
|
||||
} else {
|
||||
pbState, err = v3.ProtobufBeaconState(rawType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(bellatrixKey, rawObj...))
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.New("invalid state type")
|
||||
}
|
||||
func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, blockRoots [][32]byte, states []state.ReadOnlyBeaconState, validatorKeys [][]byte, validatorsEntries map[string]*ethpb.Validator) error {
|
||||
bucket := tx.Bucket(stateBucket)
|
||||
valIdxBkt := tx.Bucket(blockRootValidatorHashesBucket)
|
||||
for i, rt := range blockRoots {
|
||||
indicesByBucket := createStateIndicesFromStateSlot(ctx, states[i].Slot())
|
||||
if err := updateValueForIndices(ctx, indicesByBucket, rt[:], tx); err != nil {
|
||||
return errors.Wrap(err, "could not update DB indices")
|
||||
}
|
||||
|
||||
// store the validator entries separately to save space.
|
||||
return s.storeValidatorEntriesSeparately(ctx, tx, validatorsEntries)
|
||||
}); err != nil {
|
||||
return err
|
||||
// There is a gap when the states that are passed are used outside this
|
||||
// thread. But while storing the state object, we should not store the
|
||||
// validator entries.To bring the gap closer, we empty the validators
|
||||
// just before Put() and repopulate that state with original validators.
|
||||
// look at issue https://github.com/prysmaticlabs/prysm/issues/9262.
|
||||
switch rawType := states[i].InnerStateUnsafe().(type) {
|
||||
case *ethpb.BeaconState:
|
||||
var pbState *ethpb.BeaconState
|
||||
var err error
|
||||
if features.Get().EnableNativeState {
|
||||
pbState, err = state_native.ProtobufBeaconStatePhase0(rawType)
|
||||
} else {
|
||||
pbState, err = v1.ProtobufBeaconState(rawType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
encodedState, err := encode(ctx, pbState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateAltair:
|
||||
var pbState *ethpb.BeaconStateAltair
|
||||
var err error
|
||||
if features.Get().EnableNativeState {
|
||||
pbState, err = state_native.ProtobufBeaconStateAltair(rawType)
|
||||
} else {
|
||||
pbState, err = v2.ProtobufBeaconState(rawType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(altairKey, rawObj...))
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateBellatrix:
|
||||
var pbState *ethpb.BeaconStateBellatrix
|
||||
var err error
|
||||
if features.Get().EnableNativeState {
|
||||
pbState, err = state_native.ProtobufBeaconStateBellatrix(rawType)
|
||||
} else {
|
||||
pbState, err = v3.ProtobufBeaconState(rawType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(bellatrixKey, rawObj...))
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.New("invalid state type")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
// store the validator entries separately to save space.
|
||||
return s.storeValidatorEntriesSeparately(ctx, tx, validatorsEntries)
|
||||
}
|
||||
|
||||
func (s *Store) storeValidatorEntriesSeparately(ctx context.Context, tx *bolt.Tx, validatorsEntries map[string]*ethpb.Validator) error {
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -99,3 +101,16 @@ func deleteValueForIndices(ctx context.Context, indicesByBucket map[string][]byt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var errMisalignedRootList = errors.New("incorrectly packed root list, length is not a multiple of 32")
|
||||
|
||||
func splitRoots(b []byte) ([][32]byte, error) {
|
||||
rl := make([][32]byte, 0)
|
||||
if len(b)%32 != 0 {
|
||||
return nil, errors.Wrapf(errMisalignedRootList, "root list len=%d", len(b))
|
||||
}
|
||||
for s, f := 0, 32; f <= len(b); s, f = f, f+32 {
|
||||
rl = append(rl, bytesutil.ToBytes32(b[s:f]))
|
||||
}
|
||||
return rl, nil
|
||||
}
|
||||
|
||||
@@ -138,3 +138,60 @@ func Test_deleteValueForIndices(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testPack(bs [][32]byte) []byte {
|
||||
r := make([]byte, 0)
|
||||
for _, b := range bs {
|
||||
r = append(r, b[:]...)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func TestSplitRoots(t *testing.T) {
|
||||
bt := make([][32]byte, 0)
|
||||
for _, x := range []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} {
|
||||
var b [32]byte
|
||||
for i := 0; i < 32; i++ {
|
||||
b[i] = x
|
||||
}
|
||||
bt = append(bt, b)
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
b []byte
|
||||
expect [][32]byte
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "misaligned",
|
||||
b: make([]byte, 61),
|
||||
err: errMisalignedRootList,
|
||||
},
|
||||
{
|
||||
name: "happy",
|
||||
b: testPack(bt[0:5]),
|
||||
expect: bt[0:5],
|
||||
},
|
||||
{
|
||||
name: "single",
|
||||
b: testPack([][32]byte{bt[0]}),
|
||||
expect: [][32]byte{bt[0]},
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
b: []byte{},
|
||||
expect: [][32]byte{},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
r, err := splitRoots(c.b)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, c.expect, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"interfaces.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice",
|
||||
@@ -13,8 +14,10 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -20,12 +20,16 @@ go_library(
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
@@ -49,13 +53,18 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v3:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user