mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
103 Commits
testStuff1
...
rm-archive
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cfbc5e93e9 | ||
|
|
e8143682a8 | ||
|
|
8153338dbe | ||
|
|
4a00b295ed | ||
|
|
d2b39e9697 | ||
|
|
97dc86e742 | ||
|
|
cff3b99918 | ||
|
|
a7e64d699a | ||
|
|
be9847f23c | ||
|
|
4796827d22 | ||
|
|
57b7e0b572 | ||
|
|
b5039e9bd9 | ||
|
|
f5d792299f | ||
|
|
9ce922304f | ||
|
|
3cbb4aace4 | ||
|
|
c94095b609 | ||
|
|
4e7720ef8b | ||
|
|
186dd753d9 | ||
|
|
980188f260 | ||
|
|
aa7d571d40 | ||
|
|
ae858bbd0a | ||
|
|
30cd158ae5 | ||
|
|
dfb9a1e575 | ||
|
|
2db22adfe0 | ||
|
|
dbd44dd42a | ||
|
|
161a14d256 | ||
|
|
9dee22f7ab | ||
|
|
841be3369e | ||
|
|
80bd557afb | ||
|
|
52271cf0ba | ||
|
|
e1f56d403c | ||
|
|
a2193ee014 | ||
|
|
762b3df491 | ||
|
|
d2b329f33e | ||
|
|
c2f40e2ed5 | ||
|
|
01d28016b1 | ||
|
|
2b3025828f | ||
|
|
436792fe38 | ||
|
|
1d07bffe11 | ||
|
|
f086535c8a | ||
|
|
3a4c599a96 | ||
|
|
1c6cbc574e | ||
|
|
2317375983 | ||
|
|
fecc081537 | ||
|
|
fe98b8b8fd | ||
|
|
dd2ad28474 | ||
|
|
4b26679224 | ||
|
|
03f10d5a89 | ||
|
|
7922043cbc | ||
|
|
b0d5ecec0b | ||
|
|
d2c950e15c | ||
|
|
ac2f238a60 | ||
|
|
6354748b12 | ||
|
|
e910471784 | ||
|
|
ab7e97ba63 | ||
|
|
e99de7726d | ||
|
|
606fdd2299 | ||
|
|
1eb6025aaa | ||
|
|
d431ceee25 | ||
|
|
4597599196 | ||
|
|
0c32eb5c03 | ||
|
|
4b1cb6fa80 | ||
|
|
9cfb823cc6 | ||
|
|
cb502ceb8c | ||
|
|
8da4d572d9 | ||
|
|
1c6fa65f7b | ||
|
|
eaa2566e90 | ||
|
|
6957f0637f | ||
|
|
01b1f15bdf | ||
|
|
b787fd877a | ||
|
|
2c89ce810d | ||
|
|
e687fff922 | ||
|
|
5e2498be7e | ||
|
|
76f958710f | ||
|
|
1775cf89c6 | ||
|
|
8fecfaee48 | ||
|
|
f089405d2f | ||
|
|
029c81a2e4 | ||
|
|
56c48b4971 | ||
|
|
20ed47a107 | ||
|
|
e30471f1a0 | ||
|
|
3b38765a2d | ||
|
|
b60e508c89 | ||
|
|
a65c670f5e | ||
|
|
4af7d8230a | ||
|
|
27733969f7 | ||
|
|
e70fe1c9fd | ||
|
|
9b3a834437 | ||
|
|
d815fa8f21 | ||
|
|
ac3079f8cd | ||
|
|
cb8f6423e0 | ||
|
|
515e7c959f | ||
|
|
82bbfce524 | ||
|
|
95430ddb57 | ||
|
|
21b7861d37 | ||
|
|
c1e7afa201 | ||
|
|
dfa400d4a1 | ||
|
|
b04c28b30c | ||
|
|
ed07359573 | ||
|
|
25d87dd27b | ||
|
|
a9ccabf6c9 | ||
|
|
2377d6d6ea | ||
|
|
100ca0ebaf |
5
.github/actions/gofmt/Dockerfile
vendored
5
.github/actions/gofmt/Dockerfile
vendored
@@ -1,5 +0,0 @@
|
||||
FROM cytopia/gofmt
|
||||
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
12
.github/actions/gofmt/action.yml
vendored
12
.github/actions/gofmt/action.yml
vendored
@@ -1,12 +0,0 @@
|
||||
name: 'Gofmt checker'
|
||||
description: 'Checks that all project files have been properly formatted.'
|
||||
inputs:
|
||||
path:
|
||||
description: 'Path to check'
|
||||
required: true
|
||||
default: './'
|
||||
runs:
|
||||
using: 'docker'
|
||||
image: 'Dockerfile'
|
||||
args:
|
||||
- ${{ inputs.path }}
|
||||
15
.github/actions/gofmt/entrypoint.sh
vendored
15
.github/actions/gofmt/entrypoint.sh
vendored
@@ -1,15 +0,0 @@
|
||||
#!/bin/sh -l
|
||||
set -e
|
||||
|
||||
cd $GITHUB_WORKSPACE
|
||||
|
||||
# Check if any files are not formatted.
|
||||
nonformatted="$(gofmt -l $1 2>&1)"
|
||||
|
||||
# Return if `go fmt` passes.
|
||||
[ -z "$nonformatted" ] && exit 0
|
||||
|
||||
# Notify of issues with formatting.
|
||||
echo "Following files need to be properly formatted:"
|
||||
echo "$nonformatted"
|
||||
exit 1
|
||||
15
.github/workflows/go.yml
vendored
15
.github/workflows/go.yml
vendored
@@ -18,18 +18,6 @@ jobs:
|
||||
id: gomodtidy
|
||||
uses: ./.github/actions/gomodtidy
|
||||
|
||||
- name: Gofmt checker
|
||||
id: gofmt
|
||||
uses: ./.github/actions/gofmt
|
||||
with:
|
||||
path: ./
|
||||
|
||||
- name: GoImports checker
|
||||
id: goimports
|
||||
uses: Jerome1337/goimports-action@v1.0.2
|
||||
with:
|
||||
goimports-path: ./
|
||||
|
||||
gosec:
|
||||
name: Gosec scan
|
||||
runs-on: ubuntu-latest
|
||||
@@ -45,7 +33,7 @@ jobs:
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@latest
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@v2.12.0
|
||||
gosec -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
|
||||
|
||||
lint:
|
||||
@@ -65,6 +53,7 @@ jobs:
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.47.2
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
|
||||
build:
|
||||
name: Build
|
||||
|
||||
@@ -11,7 +11,9 @@ run:
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- deadcode
|
||||
- gofmt
|
||||
- goimports
|
||||
- unused
|
||||
- errcheck
|
||||
- gosimple
|
||||
- gocognit
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.2.0-rc.1)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-alpha.9/src/engine)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.2.0-rc.3)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.1/src/engine)
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
[](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
|
||||
|
||||
|
||||
22
WORKSPACE
22
WORKSPACE
@@ -28,7 +28,7 @@ load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain")
|
||||
|
||||
llvm_toolchain(
|
||||
name = "llvm_toolchain",
|
||||
llvm_version = "10.0.0",
|
||||
llvm_version = "13.0.1",
|
||||
)
|
||||
|
||||
load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
|
||||
@@ -215,7 +215,7 @@ filegroup(
|
||||
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.2.0-rc.1"
|
||||
consensus_spec_version = "v1.2.0-rc.3"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -231,7 +231,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "9c93f87378aaa6d6fe1c67b396eac2aacc9594af2a83f028cb99c95dea5b81df",
|
||||
sha256 = "18ca21497f41042cdbe60e2333b100d218b2994fb514964b9deb23daf615a12f",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -247,7 +247,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "52f2c52415228cee8a4de5a09abff785f439a77dfef8f03e834e4e16857673c1",
|
||||
sha256 = "47b8f6fabe39b4a69f13054ba74e26ab51581ddbd359c18cf0f03317474e299c",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -263,7 +263,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "022dcc0d6de7dd27b337a0d1b945077eaf5ee47000700395a693fc25e12f96df",
|
||||
sha256 = "a061efc05429b169393c32dc2633a948269461b0fe681f11d41e170a880dcc71",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -278,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "0a9c110305cbd6ebbe0d942f0f33e6ce22dd484ce4ceed277bf185a091941cde",
|
||||
sha256 = "753d51c6a6cc6df101c897e4bea77f73b271f50aeda74440f412514d4bd88a86",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -309,9 +309,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "126b615e3853e29b61f082f6c89c8bc1c38cd92fb84b0004396fc49e7acc8d9f",
|
||||
strip_prefix = "eth2-networks-f3ccbe0cf5798d5cd23e4e6e7119aefa043c0935",
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/f3ccbe0cf5798d5cd23e4e6e7119aefa043c0935.tar.gz",
|
||||
sha256 = "82b01a48b143fe0f2fb7fb5f5dd385c1f934335a12d7954f08b1d45d77427b5e",
|
||||
strip_prefix = "eth2-networks-674f7a1d01d9c18345456eab76e3871b3df2126b",
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/674f7a1d01d9c18345456eab76e3871b3df2126b.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -342,9 +342,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "98013b40922e54a64996da49b939e0a88fe2456f68eedc5aee4ceba0f8623f71",
|
||||
sha256 = "e0c0b5dc609b3a221e74c720f483c595441f2ad5e38bb8aa3522636039945a6f",
|
||||
urls = [
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.0/prysm-web-ui.tar.gz",
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.1/prysm-web-ui.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -21,14 +21,13 @@ import (
|
||||
// OriginData represents the BeaconState and SignedBeaconBlock necessary to start an empty Beacon Node
|
||||
// using Checkpoint Sync.
|
||||
type OriginData struct {
|
||||
wsd *WeakSubjectivityData
|
||||
sb []byte
|
||||
bb []byte
|
||||
st state.BeaconState
|
||||
b interfaces.SignedBeaconBlock
|
||||
vu *detect.VersionedUnmarshaler
|
||||
br [32]byte
|
||||
sr [32]byte
|
||||
sb []byte
|
||||
bb []byte
|
||||
st state.BeaconState
|
||||
b interfaces.SignedBeaconBlock
|
||||
vu *detect.VersionedUnmarshaler
|
||||
br [32]byte
|
||||
sr [32]byte
|
||||
}
|
||||
|
||||
// SaveBlock saves the downloaded block to a unique file in the given path.
|
||||
|
||||
@@ -95,8 +95,6 @@ func WithTimeout(timeout time.Duration) ClientOpt {
|
||||
// Client provides a collection of helper methods for calling the Eth Beacon Node API endpoints.
|
||||
type Client struct {
|
||||
hc *http.Client
|
||||
host string
|
||||
scheme string
|
||||
baseURL *url.URL
|
||||
}
|
||||
|
||||
|
||||
@@ -25,10 +25,8 @@ import (
|
||||
type ChainInfoFetcher interface {
|
||||
HeadFetcher
|
||||
FinalizationFetcher
|
||||
GenesisFetcher
|
||||
CanonicalFetcher
|
||||
ForkFetcher
|
||||
TimeFetcher
|
||||
HeadDomainFetcher
|
||||
}
|
||||
|
||||
@@ -70,6 +68,8 @@ type HeadFetcher interface {
|
||||
type ForkFetcher interface {
|
||||
ForkChoicer() forkchoice.ForkChoicer
|
||||
CurrentFork() *ethpb.Fork
|
||||
GenesisFetcher
|
||||
TimeFetcher
|
||||
}
|
||||
|
||||
// CanonicalFetcher retrieves the current chain's canonical information.
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/math"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -87,7 +88,6 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
return nil
|
||||
}
|
||||
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
s.headLock.RLock()
|
||||
oldHeadBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
@@ -98,12 +98,23 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
headSlot := s.HeadSlot()
|
||||
newHeadSlot := headBlock.Block().Slot()
|
||||
newStateRoot := headBlock.Block().StateRoot()
|
||||
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != oldHeadRoot {
|
||||
commonRoot, forkSlot, err := s.ForkChoicer().CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not find common ancestor root")
|
||||
commonRoot = params.BeaconConfig().ZeroHash
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||
"oldSlot": fmt.Sprintf("%d", headSlot),
|
||||
}).Debug("Chain reorg occurred")
|
||||
absoluteSlotDifference := slots.AbsoluteValueSlotDifference(newHeadSlot, headSlot)
|
||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||
"newRoot": fmt.Sprintf("%#x", newHeadRoot),
|
||||
"oldSlot": fmt.Sprintf("%d", headSlot),
|
||||
"oldRoot": fmt.Sprintf("%#x", oldHeadRoot),
|
||||
"commonAncestorRoot": fmt.Sprintf("%#x", commonRoot),
|
||||
"distance": headSlot + newHeadSlot - 2*forkSlot,
|
||||
"depth": math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
|
||||
}).Info("Chain reorg occurred")
|
||||
isOptimistic, err := s.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if node is optimistically synced")
|
||||
@@ -112,7 +123,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
Type: statefeed.Reorg,
|
||||
Data: ðpbv1.EventChainReorg{
|
||||
Slot: newHeadSlot,
|
||||
Depth: absoluteSlotDifference,
|
||||
Depth: math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
|
||||
OldHeadBlock: oldHeadRoot[:],
|
||||
NewHeadBlock: newHeadRoot[:],
|
||||
OldHeadState: oldStateRoot,
|
||||
@@ -333,7 +344,7 @@ func (s *Service) notifyNewHeadEvent(
|
||||
// This saves the attestations between `orphanedRoot` and the common ancestor root that is derived using `newHeadRoot`.
|
||||
// It also filters out the attestations that is one epoch older as a defense so invalid attestations don't flow into the attestation pool.
|
||||
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
|
||||
commonAncestorRoot, err := s.ForkChoicer().CommonAncestorRoot(ctx, newHeadRoot, orphanedRoot)
|
||||
commonAncestorRoot, _, err := s.ForkChoicer().CommonAncestor(ctx, newHeadRoot, orphanedRoot)
|
||||
switch {
|
||||
// Exit early if there's no common ancestor and root doesn't exist, there would be nothing to save.
|
||||
case errors.Is(err, forkchoice.ErrUnknownCommonAncestor):
|
||||
|
||||
@@ -150,6 +150,8 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
assert.DeepEqual(t, newHeadSignedBlock, pb, "Head did not change")
|
||||
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
|
||||
require.LogsContain(t, hook, "Chain reorg occurred")
|
||||
require.LogsContain(t, hook, "distance=1")
|
||||
require.LogsContain(t, hook, "depth=1")
|
||||
}
|
||||
|
||||
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
|
||||
|
||||
@@ -139,6 +139,9 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
if err := s.insertBlockToForkchoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
|
||||
}
|
||||
if err := s.handleBlockAttestations(ctx, signed.Block(), postState); err != nil {
|
||||
return errors.Wrap(err, "could not handle block's attestations")
|
||||
}
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, signed.Block().Body().AttesterSlashings())
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
|
||||
@@ -516,6 +519,29 @@ func (s *Service) insertBlockToForkchoiceStore(ctx context.Context, blk interfac
|
||||
return nil
|
||||
}
|
||||
|
||||
// This feeds in the attestations included in the block to fork choice store. It's allows fork choice store
|
||||
// to gain information on the most current chain.
|
||||
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.BeaconBlock, st state.BeaconState) error {
|
||||
// Feed in block's attestations to fork choice store.
|
||||
for _, a := range blk.Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, st, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indices, err := attestation.AttestingIndices(a.AggregationBits, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
|
||||
if s.cfg.ForkChoiceStore.HasNode(r) {
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.Data.Target.Epoch)
|
||||
} else if err := s.cfg.AttPool.SaveBlockAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
|
||||
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
|
||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
|
||||
|
||||
@@ -3006,10 +3006,9 @@ func TestStore_NoViableHead_Reboot_DoublyLinkedTree(t *testing.T) {
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
|
||||
// The node is optimistic now.
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
require.Equal(t, false, optimistic)
|
||||
require.Equal(t, false, service.ForkChoicer().AllTipsAreInvalid())
|
||||
|
||||
// Check that the node's justified checkpoint does not agree with the
|
||||
@@ -3230,10 +3229,9 @@ func TestStore_NoViableHead_Reboot_Protoarray(t *testing.T) {
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
|
||||
// The node is optimistic now
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
require.Equal(t, false, optimistic)
|
||||
require.Equal(t, false, service.ForkChoicer().AllTipsAreInvalid())
|
||||
|
||||
// Check that the node's justified checkpoint does not agree with the
|
||||
@@ -3314,6 +3312,75 @@ func TestStore_NoViableHead_Reboot_Protoarray(t *testing.T) {
|
||||
require.Equal(t, false, service.ForkChoicer().AllTipsAreInvalid())
|
||||
}
|
||||
|
||||
func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(doublylinkedtree.New()),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// prepare another block that is not inserted
|
||||
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
|
||||
require.NoError(t, err)
|
||||
b3, err := util.GenerateFullBlock(st3, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a := wsb.Block().Body().Attestations()[0]
|
||||
r := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
|
||||
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a3 := wsb3.Block().Body().Attestations()[0]
|
||||
r3 := bytesutil.ToBytes32(a3.Data.BeaconBlockRoot)
|
||||
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
|
||||
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committe as st
|
||||
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committe as st
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
|
||||
}
|
||||
|
||||
// Helper function to simulate the block being on time or delayed for proposer
|
||||
// boost. It alters the genesisTime tracked by the store.
|
||||
func driftGenesisTime(s *Service, slot int64, delay int64) {
|
||||
|
||||
@@ -150,11 +150,6 @@ func (s *Service) handlePostBlockOperations(b interfaces.BeaconBlock) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add block attestations to the fork choice pool to compute head.
|
||||
if err := s.cfg.AttPool.SaveBlockAttestations(b.Body().Attestations()); err != nil {
|
||||
log.WithError(err).Error("Could not save block attestations for fork choice")
|
||||
return nil
|
||||
}
|
||||
// Mark block exits as seen so we don't include same ones in future blocks.
|
||||
for _, e := range b.Body().VoluntaryExits() {
|
||||
s.cfg.ExitPool.MarkIncluded(e)
|
||||
|
||||
@@ -76,9 +76,9 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
if baCount := len(s.cfg.AttPool.BlockAttestations()); baCount != 2 {
|
||||
if baCount := len(s.cfg.AttPool.BlockAttestations()); baCount != 0 {
|
||||
t.Errorf("Did not get the correct number of block attestations saved to the pool. "+
|
||||
"Got %d but wanted %d", baCount, 2)
|
||||
"Got %d but wanted %d", baCount, 0)
|
||||
}
|
||||
},
|
||||
},
|
||||
|
||||
@@ -231,14 +231,15 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if err := forkChoicer.InsertNode(s.ctx, st, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not insert finalized block to forkchoice")
|
||||
}
|
||||
|
||||
lastValidatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get last validated checkpoint")
|
||||
}
|
||||
if bytes.Equal(finalized.Root, lastValidatedCheckpoint.Root) {
|
||||
if err := forkChoicer.SetOptimisticToValid(s.ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not set finalized block as validated")
|
||||
if !features.Get().EnableStartOptimistic {
|
||||
lastValidatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get last validated checkpoint")
|
||||
}
|
||||
if bytes.Equal(finalized.Root, lastValidatedCheckpoint.Root) {
|
||||
if err := forkChoicer.SetOptimisticToValid(s.ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not set finalized block as validated")
|
||||
}
|
||||
}
|
||||
}
|
||||
// not attempting to save initial sync blocks here, because there shouldn't be any until
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
@@ -528,3 +529,45 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
require.Equal(b, true, s.cfg.ForkChoiceStore.HasNode(r), "Block is not in fork choice store")
|
||||
}
|
||||
}
|
||||
|
||||
func TestChainService_EverythingOptimistic(t *testing.T) {
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EnableStartOptimistic: true,
|
||||
})
|
||||
defer resetFn()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := util.NewBeaconBlock()
|
||||
headBlock.Block.Slot = finalizedSlot
|
||||
headBlock.Block.ParentRoot = bytesutil.PadTo(genesisRoot[:], 32)
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(finalizedSlot))
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
|
||||
require.NoError(t, err)
|
||||
stateGen := stategen.New(beaconDB)
|
||||
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stateGen), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
require.NoError(t, c.StartFromSavedState(headState))
|
||||
require.Equal(t, true, c.cfg.ForkChoiceStore.HasNode(headRoot))
|
||||
op, err := c.cfg.ForkChoiceStore.IsOptimistic(headRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, op)
|
||||
}
|
||||
|
||||
@@ -185,7 +185,21 @@ func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.Depos
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
return dc.deposits
|
||||
// Make a shallow copy of the deposits and return that. This way, the
|
||||
// caller can safely iterate over the returned list of deposits without
|
||||
// the possibility of new deposits showing up. If we were to return the
|
||||
// list without a copy, when a new deposit is added to the cache, it
|
||||
// would also be present in the returned value. This could result in a
|
||||
// race condition if the list is being iterated over.
|
||||
//
|
||||
// It's not necessary to make a deep copy of this list because the
|
||||
// deposits in the cache should never be modified. It is still possible
|
||||
// for the caller to modify one of the underlying deposits and modify
|
||||
// the cache, but that's not a race condition. Also, a deep copy would
|
||||
// take too long and use too much memory.
|
||||
deposits := make([]*ethpb.DepositContainer, len(dc.deposits))
|
||||
copy(deposits, dc.deposits)
|
||||
return deposits
|
||||
}
|
||||
|
||||
// AllDeposits returns a list of historical deposits until the given block number
|
||||
|
||||
@@ -55,7 +55,7 @@ func (dc *DepositCache) PendingDeposits(ctx context.Context, untilBlk *big.Int)
|
||||
|
||||
depositCntrs := dc.PendingContainers(ctx, untilBlk)
|
||||
|
||||
var deposits []*ethpb.Deposit
|
||||
deposits := make([]*ethpb.Deposit, 0, len(depositCntrs))
|
||||
for _, dep := range depositCntrs {
|
||||
deposits = append(deposits, dep.Deposit)
|
||||
}
|
||||
@@ -71,7 +71,7 @@ func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var depositCntrs []*ethpb.DepositContainer
|
||||
depositCntrs := make([]*ethpb.DepositContainer, 0, len(dc.pendingDeposits))
|
||||
for _, ctnr := range dc.pendingDeposits {
|
||||
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
depositCntrs = append(depositCntrs, ctnr)
|
||||
@@ -139,7 +139,7 @@ func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeInde
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
var cleanDeposits []*ethpb.DepositContainer
|
||||
cleanDeposits := make([]*ethpb.DepositContainer, 0, len(dc.pendingDeposits))
|
||||
for _, dp := range dc.pendingDeposits {
|
||||
if dp.Index >= merkleTreeIndex {
|
||||
cleanDeposits = append(cleanDeposits, dp)
|
||||
|
||||
@@ -16,7 +16,7 @@ func UpdateGenesisEth1Data(state state.BeaconState, deposits []*ethpb.Deposit, e
|
||||
return nil, errors.New("no eth1data provided for genesis state")
|
||||
}
|
||||
|
||||
var leaves [][]byte
|
||||
leaves := make([][]byte, 0, len(deposits))
|
||||
for _, deposit := range deposits {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, fmt.Errorf("nil deposit or deposit with nil data cannot be processed: %v", deposit)
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
)
|
||||
|
||||
// NewDB initializes a new DB.
|
||||
func NewDB(ctx context.Context, dirPath string, config *kv.Config) (Database, error) {
|
||||
return kv.NewKVStore(ctx, dirPath, config)
|
||||
func NewDB(ctx context.Context, dirPath string) (Database, error) {
|
||||
return kv.NewKVStore(ctx, dirPath)
|
||||
}
|
||||
|
||||
// NewDBFilename uses the KVStoreDatafilePath so that if this layer of
|
||||
|
||||
@@ -38,14 +38,9 @@ type ReadOnlyDatabase interface {
|
||||
HasState(ctx context.Context, blockRoot [32]byte) bool
|
||||
StateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error)
|
||||
HasStateSummary(ctx context.Context, blockRoot [32]byte) bool
|
||||
HighestSlotStatesBelow(ctx context.Context, slot types.Slot) ([]state.ReadOnlyBeaconState, error)
|
||||
// Checkpoint operations.
|
||||
JustifiedCheckpoint(ctx context.Context) (*ethpb.Checkpoint, error)
|
||||
FinalizedCheckpoint(ctx context.Context) (*ethpb.Checkpoint, error)
|
||||
ArchivedPointRoot(ctx context.Context, slot types.Slot) [32]byte
|
||||
HasArchivedPoint(ctx context.Context, slot types.Slot) bool
|
||||
LastArchivedRoot(ctx context.Context) [32]byte
|
||||
LastArchivedSlot(ctx context.Context) (types.Slot, error)
|
||||
LastValidatedCheckpoint(ctx context.Context) (*ethpb.Checkpoint, error)
|
||||
// Deposit contract related handlers.
|
||||
DepositContractAddress(ctx context.Context) ([]byte, error)
|
||||
|
||||
@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"archived_point.go",
|
||||
"backup.go",
|
||||
"blocks.go",
|
||||
"checkpoint.go",
|
||||
@@ -17,7 +16,6 @@ go_library(
|
||||
"kv.go",
|
||||
"log.go",
|
||||
"migration.go",
|
||||
"migration_archived_index.go",
|
||||
"migration_blinded_beacon_blocks.go",
|
||||
"migration_block_slot_index.go",
|
||||
"migration_state_validators.go",
|
||||
@@ -78,7 +76,6 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"archived_point_test.go",
|
||||
"backup_test.go",
|
||||
"blocks_test.go",
|
||||
"checkpoint_test.go",
|
||||
@@ -89,7 +86,6 @@ go_test(
|
||||
"genesis_test.go",
|
||||
"init_test.go",
|
||||
"kv_test.go",
|
||||
"migration_archived_index_test.go",
|
||||
"migration_block_slot_index_test.go",
|
||||
"migration_state_validators_test.go",
|
||||
"state_summary_test.go",
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// LastArchivedSlot from the db.
|
||||
func (s *Store) LastArchivedSlot(ctx context.Context) (types.Slot, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedSlot")
|
||||
defer span.End()
|
||||
var index types.Slot
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(stateSlotIndicesBucket)
|
||||
b, _ := bkt.Cursor().Last()
|
||||
index = bytesutil.BytesToSlotBigEndian(b)
|
||||
return nil
|
||||
})
|
||||
|
||||
return index, err
|
||||
}
|
||||
|
||||
// LastArchivedRoot from the db.
|
||||
func (s *Store) LastArchivedRoot(ctx context.Context) [32]byte {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedRoot")
|
||||
defer span.End()
|
||||
|
||||
var blockRoot []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(stateSlotIndicesBucket)
|
||||
_, blockRoot = bkt.Cursor().Last()
|
||||
return nil
|
||||
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return bytesutil.ToBytes32(blockRoot)
|
||||
}
|
||||
|
||||
// ArchivedPointRoot returns the block root of an archived point from the DB.
|
||||
// This is essential for cold state management and to restore a cold state.
|
||||
func (s *Store) ArchivedPointRoot(ctx context.Context, slot types.Slot) [32]byte {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.ArchivedPointRoot")
|
||||
defer span.End()
|
||||
|
||||
var blockRoot []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateSlotIndicesBucket)
|
||||
blockRoot = bucket.Get(bytesutil.SlotToBytesBigEndian(slot))
|
||||
return nil
|
||||
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return bytesutil.ToBytes32(blockRoot)
|
||||
}
|
||||
|
||||
// HasArchivedPoint returns true if an archived point exists in DB.
|
||||
func (s *Store) HasArchivedPoint(ctx context.Context, slot types.Slot) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasArchivedPoint")
|
||||
defer span.End()
|
||||
var exists bool
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
iBucket := tx.Bucket(stateSlotIndicesBucket)
|
||||
exists = iBucket.Get(bytesutil.SlotToBytesBigEndian(slot)) != nil
|
||||
return nil
|
||||
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
|
||||
panic(err)
|
||||
}
|
||||
return exists
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
)
|
||||
|
||||
func TestArchivedPointIndexRoot_CanSaveRetrieve(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
i1 := types.Slot(100)
|
||||
r1 := [32]byte{'A'}
|
||||
|
||||
received := db.ArchivedPointRoot(ctx, i1)
|
||||
require.NotEqual(t, r1, received, "Should not have been saved")
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(i1))
|
||||
require.NoError(t, db.SaveState(ctx, st, r1))
|
||||
received = db.ArchivedPointRoot(ctx, i1)
|
||||
assert.Equal(t, r1, received, "Should have been saved")
|
||||
}
|
||||
|
||||
func TestLastArchivedPoint_CanRetrieve(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
i, err := db.LastArchivedSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, types.Slot(0), i, "Did not get correct index")
|
||||
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, db.SaveState(ctx, st, [32]byte{'A'}))
|
||||
assert.Equal(t, [32]byte{'A'}, db.LastArchivedRoot(ctx), "Did not get wanted root")
|
||||
|
||||
assert.NoError(t, st.SetSlot(2))
|
||||
assert.NoError(t, db.SaveState(ctx, st, [32]byte{'B'}))
|
||||
assert.Equal(t, [32]byte{'B'}, db.LastArchivedRoot(ctx))
|
||||
|
||||
assert.NoError(t, st.SetSlot(3))
|
||||
assert.NoError(t, db.SaveState(ctx, st, [32]byte{'C'}))
|
||||
|
||||
i, err = db.LastArchivedSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, types.Slot(3), i, "Did not get correct index")
|
||||
}
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func TestStore_Backup(t *testing.T) {
|
||||
db, err := NewKVStore(context.Background(), t.TempDir(), &Config{})
|
||||
db, err := NewKVStore(context.Background(), t.TempDir())
|
||||
require.NoError(t, err, "Failed to instantiate DB")
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -44,7 +44,7 @@ func TestStore_Backup(t *testing.T) {
|
||||
// our NewKVStore function expects when opening a database.
|
||||
require.NoError(t, os.Rename(oldFilePath, newFilePath))
|
||||
|
||||
backedDB, err := NewKVStore(ctx, backupsPath, &Config{})
|
||||
backedDB, err := NewKVStore(ctx, backupsPath)
|
||||
require.NoError(t, err, "Failed to instantiate DB")
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, backedDB.Close(), "Failed to close database")
|
||||
@@ -53,7 +53,7 @@ func TestStore_Backup(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_BackupMultipleBuckets(t *testing.T) {
|
||||
db, err := NewKVStore(context.Background(), t.TempDir(), &Config{})
|
||||
db, err := NewKVStore(context.Background(), t.TempDir())
|
||||
require.NoError(t, err, "Failed to instantiate DB")
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -88,7 +88,7 @@ func TestStore_BackupMultipleBuckets(t *testing.T) {
|
||||
// our NewKVStore function expects when opening a database.
|
||||
require.NoError(t, os.Rename(oldFilePath, newFilePath))
|
||||
|
||||
backedDB, err := NewKVStore(ctx, backupsPath, &Config{})
|
||||
backedDB, err := NewKVStore(ctx, backupsPath)
|
||||
require.NoError(t, err, "Failed to instantiate DB")
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, backedDB.Close(), "Failed to close database")
|
||||
|
||||
@@ -21,3 +21,5 @@ var ErrNotFoundBackfillBlockRoot = errors.Wrap(ErrNotFound, "BackfillBlockRoot")
|
||||
|
||||
// ErrNotFoundFeeRecipient is a not found error specifically for the fee recipient getter
|
||||
var ErrNotFoundFeeRecipient = errors.Wrap(ErrNotFound, "fee recipient")
|
||||
|
||||
var errSavedStateMissingBlock = errors.New("could not find block corresponding to saved state")
|
||||
|
||||
@@ -37,6 +37,8 @@ const (
|
||||
boltAllocSize = 8 * 1024 * 1024
|
||||
// The size of hash length in bytes
|
||||
hashLength = 32
|
||||
// Specifies the initial mmap size of bolt.
|
||||
mmapSize = 536870912
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -70,11 +72,6 @@ var blockedBuckets = [][]byte{
|
||||
finalizedBlockRootsIndexBucket,
|
||||
}
|
||||
|
||||
// Config for the bolt db kv store.
|
||||
type Config struct {
|
||||
InitialMMapSize int
|
||||
}
|
||||
|
||||
// Store defines an implementation of the Prysm Database interface
|
||||
// using BoltDB as the underlying persistent kv-store for Ethereum Beacon Nodes.
|
||||
type Store struct {
|
||||
@@ -96,7 +93,7 @@ func KVStoreDatafilePath(dirPath string) string {
|
||||
// NewKVStore initializes a new boltDB key-value store at the directory
|
||||
// path specified, creates the kv-buckets based on the schema, and stores
|
||||
// an open connection db object as a property of the Store struct.
|
||||
func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, error) {
|
||||
func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
|
||||
hasDir, err := file.HasDir(dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -113,7 +110,7 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
params.BeaconIoConfig().ReadWritePermissions,
|
||||
&bolt.Options{
|
||||
Timeout: 1 * time.Second,
|
||||
InitialMmapSize: config.InitialMMapSize,
|
||||
InitialMmapSize: mmapSize,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
@@ -170,7 +167,6 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
attestationTargetRootIndicesBucket,
|
||||
attestationTargetEpochIndicesBucket,
|
||||
blockSlotIndicesBucket,
|
||||
stateSlotIndicesBucket,
|
||||
blockParentRootIndicesBucket,
|
||||
finalizedBlockRootsIndexBucket,
|
||||
blockRootValidatorHashesBucket,
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
// setupDB instantiates and returns a Store instance.
|
||||
func setupDB(t testing.TB) *Store {
|
||||
db, err := NewKVStore(context.Background(), t.TempDir(), &Config{})
|
||||
db, err := NewKVStore(context.Background(), t.TempDir())
|
||||
require.NoError(t, err, "Failed to instantiate DB")
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close(), "Failed to close database")
|
||||
|
||||
@@ -11,7 +11,6 @@ var migrationCompleted = []byte("done")
|
||||
type migration func(context.Context, *bolt.DB) error
|
||||
|
||||
var migrations = []migration{
|
||||
migrateArchivedIndex,
|
||||
migrateBlockSlotIndex,
|
||||
migrateStateValidators,
|
||||
migrateBlindedBeaconBlocksEnabled,
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var migrationArchivedIndex0Key = []byte("archive_index_0")
|
||||
|
||||
func migrateArchivedIndex(ctx context.Context, db *bolt.DB) error {
|
||||
if updateErr := db.Update(func(tx *bolt.Tx) error {
|
||||
mb := tx.Bucket(migrationsBucket)
|
||||
if b := mb.Get(migrationArchivedIndex0Key); bytes.Equal(b, migrationCompleted) {
|
||||
return nil // Migration already completed.
|
||||
}
|
||||
|
||||
bkt := tx.Bucket(archivedRootBucket)
|
||||
if bkt == nil {
|
||||
return nil
|
||||
}
|
||||
// Remove "last archived index" key before iterating over all keys.
|
||||
if err := bkt.Delete(lastArchivedIndexKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var highest types.Slot
|
||||
c := bkt.Cursor()
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
// Look up actual slot from block
|
||||
b := tx.Bucket(blocksBucket).Get(v)
|
||||
// Skip this key if there is no block for whatever reason.
|
||||
if b == nil {
|
||||
continue
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlock{}
|
||||
if err := decode(context.TODO(), b, blk); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tx.Bucket(stateSlotIndicesBucket).Put(bytesutil.SlotToBytesBigEndian(blk.Block.Slot), v); err != nil {
|
||||
return err
|
||||
}
|
||||
if blk.Block.Slot > highest {
|
||||
highest = blk.Block.Slot
|
||||
}
|
||||
// check if context is cancelled in between
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Delete deprecated buckets.
|
||||
for _, bkt := range [][]byte{slotsHasObjectBucket, archivedRootBucket} {
|
||||
if tx.Bucket(bkt) != nil {
|
||||
if err := tx.DeleteBucket(bkt); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Mark migration complete.
|
||||
return mb.Put(migrationArchivedIndex0Key, migrationCompleted)
|
||||
}); updateErr != nil {
|
||||
log.WithError(updateErr).Errorf("could not migrate bucket: %s", archivedRootBucket)
|
||||
return updateErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
func Test_migrateArchivedIndex(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(t *testing.T, db *bbolt.DB)
|
||||
eval func(t *testing.T, db *bbolt.DB)
|
||||
}{
|
||||
{
|
||||
name: "only runs once",
|
||||
setup: func(t *testing.T, db *bbolt.DB) {
|
||||
err := db.Update(func(tx *bbolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists(archivedRootBucket)
|
||||
assert.NoError(t, err)
|
||||
if err := tx.Bucket(archivedRootBucket).Put(bytesutil.Uint64ToBytesLittleEndian(2048), []byte("foo")); err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.Bucket(migrationsBucket).Put(migrationArchivedIndex0Key, migrationCompleted)
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
eval: func(t *testing.T, db *bbolt.DB) {
|
||||
err := db.View(func(tx *bbolt.Tx) error {
|
||||
v := tx.Bucket(archivedRootBucket).Get(bytesutil.Uint64ToBytesLittleEndian(2048))
|
||||
assert.DeepEqual(t, []byte("foo"), v, "Did not receive correct data for key 2048")
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "migrates and deletes entries",
|
||||
setup: func(t *testing.T, db *bbolt.DB) {
|
||||
err := db.Update(func(tx *bbolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists(archivedRootBucket)
|
||||
assert.NoError(t, err)
|
||||
_, err = tx.CreateBucketIfNotExists(slotsHasObjectBucket)
|
||||
assert.NoError(t, err)
|
||||
if err := tx.Bucket(archivedRootBucket).Put(bytesutil.Uint64ToBytesLittleEndian(2048), []byte("foo")); err != nil {
|
||||
return err
|
||||
}
|
||||
sb := util.NewBeaconBlock()
|
||||
sb.Block.Slot = 2048
|
||||
b, err := encode(context.Background(), sb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.Bucket(blocksBucket).Put([]byte("foo"), b)
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
eval: func(t *testing.T, db *bbolt.DB) {
|
||||
err := db.View(func(tx *bbolt.Tx) error {
|
||||
k := uint64(2048)
|
||||
v := tx.Bucket(stateSlotIndicesBucket).Get(bytesutil.Uint64ToBytesBigEndian(k))
|
||||
assert.DeepEqual(t, []byte("foo"), v, "Did not receive correct data for key %d", k)
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deletes old buckets",
|
||||
setup: func(t *testing.T, db *bbolt.DB) {
|
||||
err := db.Update(func(tx *bbolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists(archivedRootBucket)
|
||||
assert.NoError(t, err)
|
||||
_, err = tx.CreateBucketIfNotExists(slotsHasObjectBucket)
|
||||
assert.NoError(t, err)
|
||||
return tx.Bucket(slotsHasObjectBucket).Put(savedStateSlotsKey, []byte("foo"))
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
eval: func(t *testing.T, db *bbolt.DB) {
|
||||
err := db.View(func(tx *bbolt.Tx) error {
|
||||
assert.Equal(t, (*bbolt.Bucket)(nil), tx.Bucket(slotsHasObjectBucket), "Expected %v to be deleted", savedStateSlotsKey)
|
||||
assert.Equal(t, (*bbolt.Bucket)(nil), tx.Bucket(archivedRootBucket), "Expected %v to be deleted", savedStateSlotsKey)
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t).db
|
||||
tt.setup(t, db)
|
||||
assert.NoError(t, migrateArchivedIndex(context.Background(), db), "migrateArchivedIndex(tx) error")
|
||||
tt.eval(t, db)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -202,7 +202,7 @@ func Test_migrateStateValidators(t *testing.T) {
|
||||
defer resetCfg()
|
||||
|
||||
tt.setup(t, dbStore, st, vals)
|
||||
assert.NoError(t, migrateStateValidators(context.Background(), dbStore.db), "migrateArchivedIndex(tx) error")
|
||||
assert.NoError(t, migrateStateValidators(context.Background(), dbStore.db), "migrateStateValidators(tx) error")
|
||||
tt.eval(t, dbStore, st, vals)
|
||||
})
|
||||
}
|
||||
@@ -309,7 +309,7 @@ func Test_migrateAltairStateValidators(t *testing.T) {
|
||||
defer resetCfg()
|
||||
|
||||
tt.setup(t, dbStore, st, vals)
|
||||
assert.NoError(t, migrateStateValidators(context.Background(), dbStore.db), "migrateArchivedIndex(tx) error")
|
||||
assert.NoError(t, migrateStateValidators(context.Background(), dbStore.db), "migrateStateValidators(tx) error")
|
||||
tt.eval(t, dbStore, st, vals)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -21,15 +21,9 @@ var (
|
||||
feeRecipientBucket = []byte("fee-recipient")
|
||||
registrationBucket = []byte("registration")
|
||||
|
||||
// Deprecated: This bucket was migrated in PR 6461. Do not use, except for migrations.
|
||||
slotsHasObjectBucket = []byte("slots-has-objects")
|
||||
// Deprecated: This bucket was migrated in PR 6461. Do not use, except for migrations.
|
||||
archivedRootBucket = []byte("archived-index-root")
|
||||
|
||||
// Key indices buckets.
|
||||
blockParentRootIndicesBucket = []byte("block-parent-root-indices")
|
||||
blockSlotIndicesBucket = []byte("block-slot-indices")
|
||||
stateSlotIndicesBucket = []byte("state-slot-indices")
|
||||
attestationHeadBlockRootBucket = []byte("attestation-head-block-root-indices")
|
||||
attestationSourceRootIndicesBucket = []byte("attestation-source-root-indices")
|
||||
attestationSourceEpochIndicesBucket = []byte("attestation-source-epoch-indices")
|
||||
@@ -57,11 +51,6 @@ var (
|
||||
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
|
||||
backfillBlockRootKey = []byte("backfill-block-root")
|
||||
|
||||
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
|
||||
lastArchivedIndexKey = []byte("last-archived")
|
||||
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
|
||||
savedStateSlotsKey = []byte("saved-state-slots")
|
||||
|
||||
// New state management service compatibility bucket.
|
||||
newStateServiceCompatibleBucket = []byte("new-state-compatible")
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/snappy"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
@@ -15,9 +16,9 @@ import (
|
||||
v3 "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/v3"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
@@ -139,10 +140,6 @@ func (s *Store) SaveStates(ctx context.Context, states []state.ReadOnlyBeaconSta
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateBucket)
|
||||
for i, rt := range blockRoots {
|
||||
indicesByBucket := createStateIndicesFromStateSlot(ctx, states[i].Slot())
|
||||
if err := updateValueForIndices(ctx, indicesByBucket, rt[:], tx); err != nil {
|
||||
return errors.Wrap(err, "could not update DB indices")
|
||||
}
|
||||
if err := bucket.Put(rt[:], multipleEncs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -209,11 +206,6 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
|
||||
bucket := tx.Bucket(stateBucket)
|
||||
valIdxBkt := tx.Bucket(blockRootValidatorHashesBucket)
|
||||
for i, rt := range blockRoots {
|
||||
indicesByBucket := createStateIndicesFromStateSlot(ctx, states[i].Slot())
|
||||
if err := updateValueForIndices(ctx, indicesByBucket, rt[:], tx); err != nil {
|
||||
return errors.Wrap(err, "could not update DB indices")
|
||||
}
|
||||
|
||||
// There is a gap when the states that are passed are used outside this
|
||||
// thread. But while storing the state object, we should not store the
|
||||
// validator entries.To bring the gap closer, we empty the validators
|
||||
@@ -393,15 +385,6 @@ func (s *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
slot, err := s.slotByBlockRoot(ctx, tx, blockRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indicesByBucket := createStateIndicesFromStateSlot(ctx, slot)
|
||||
if err := deleteValueForIndices(ctx, indicesByBucket, blockRoot[:], tx); err != nil {
|
||||
return errors.Wrap(err, "could not delete root for DB indices")
|
||||
}
|
||||
|
||||
ok, err := s.isStateValidatorMigrationOver()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -635,135 +618,17 @@ func (s *Store) stateBytes(ctx context.Context, blockRoot [32]byte) ([]byte, err
|
||||
return dst, err
|
||||
}
|
||||
|
||||
// slotByBlockRoot retrieves the corresponding slot of the input block root.
|
||||
func (s *Store) slotByBlockRoot(ctx context.Context, tx *bolt.Tx, blockRoot []byte) (types.Slot, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.slotByBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
bkt := tx.Bucket(stateSummaryBucket)
|
||||
enc := bkt.Get(blockRoot)
|
||||
|
||||
if enc == nil {
|
||||
// Fall back to check the block.
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
enc := bkt.Get(blockRoot)
|
||||
|
||||
if enc == nil {
|
||||
// Fallback and check the state.
|
||||
bkt = tx.Bucket(stateBucket)
|
||||
enc = bkt.Get(blockRoot)
|
||||
if enc == nil {
|
||||
return 0, errors.New("state enc can't be nil")
|
||||
}
|
||||
// no need to construct the validator entries as it is not used here.
|
||||
s, err := s.unmarshalState(ctx, enc, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if s == nil || s.IsNil() {
|
||||
return 0, errors.New("state can't be nil")
|
||||
}
|
||||
return s.Slot(), nil
|
||||
}
|
||||
b := ðpb.SignedBeaconBlock{}
|
||||
err := decode(ctx, enc, b)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(wsb); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return b.Block.Slot, nil
|
||||
}
|
||||
stateSummary := ðpb.StateSummary{}
|
||||
if err := decode(ctx, enc, stateSummary); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return stateSummary.Slot, nil
|
||||
}
|
||||
|
||||
// HighestSlotStatesBelow returns the states with the highest slot below the input slot
|
||||
// from the db. Ideally there should just be one state per slot, but given validator
|
||||
// can double propose, a single slot could have multiple block roots and
|
||||
// results states. This returns a list of states.
|
||||
func (s *Store) HighestSlotStatesBelow(ctx context.Context, slot types.Slot) ([]state.ReadOnlyBeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HighestSlotStatesBelow")
|
||||
defer span.End()
|
||||
|
||||
var best []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(stateSlotIndicesBucket)
|
||||
c := bkt.Cursor()
|
||||
for s, root := c.First(); s != nil; s, root = c.Next() {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
key := bytesutil.BytesToSlotBigEndian(s)
|
||||
if root == nil {
|
||||
continue
|
||||
}
|
||||
if key >= slot {
|
||||
break
|
||||
}
|
||||
best = root
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var st state.ReadOnlyBeaconState
|
||||
var err error
|
||||
if best != nil {
|
||||
st, err = s.State(ctx, bytesutil.ToBytes32(best))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if st == nil || st.IsNil() {
|
||||
st, err = s.GenesisState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return []state.ReadOnlyBeaconState{st}, nil
|
||||
}
|
||||
|
||||
// createStateIndicesFromStateSlot takes in a state slot and returns
|
||||
// a map of bolt DB index buckets corresponding to each particular key for indices for
|
||||
// data, such as (shard indices bucket -> shard 5).
|
||||
func createStateIndicesFromStateSlot(ctx context.Context, slot types.Slot) map[string][]byte {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.createStateIndicesFromState")
|
||||
defer span.End()
|
||||
indicesByBucket := make(map[string][]byte)
|
||||
// Every index has a unique bucket for fast, binary-search
|
||||
// range scans for filtering across keys.
|
||||
buckets := [][]byte{
|
||||
stateSlotIndicesBucket,
|
||||
}
|
||||
|
||||
indices := [][]byte{
|
||||
bytesutil.SlotToBytesBigEndian(slot),
|
||||
}
|
||||
for i := 0; i < len(buckets); i++ {
|
||||
indicesByBucket[string(buckets[i])] = indices[i]
|
||||
}
|
||||
return indicesByBucket
|
||||
}
|
||||
|
||||
// CleanUpDirtyStates removes states in DB that falls to under archived point interval rules.
|
||||
// Only following states would be kept:
|
||||
// 1.) state_slot % archived_interval == 0. (e.g. archived_interval=2048, states with slot 2048, 4096... etc)
|
||||
// 2.) archived_interval - archived_interval/3 < state_slot % archived_interval
|
||||
// (e.g. archived_interval=2048, states with slots after 1365).
|
||||
// This is to tolerate skip slots. Not every state lays on the boundary.
|
||||
// 3.) state with current finalized root
|
||||
// 4.) unfinalized States
|
||||
// CleanUpDirtyStates attempts to maintain the promise to save approximately <head slot / save state interval> states.
|
||||
// To do that, we save about 1 state every eg 2048 slots (default slotsPerArchivedPoint value), calling the slot
|
||||
// where the save happened the "save point". Due to skipped slots, there may not be a block at a multiple of 2048,
|
||||
// in which case the saved state point will be at the slot where the last block was previously included in the interval.
|
||||
// We don't want to delete the most recently finalized state, which is saved to the same database,
|
||||
// and in long periods of non-finality, stategen may also write a state every 128 slots to aid in recovery.
|
||||
// So we preserve:
|
||||
// 1. any state where the slot number is a multiple of 2048 (slot % 2048 == 0)
|
||||
// 2. any state with a slot number within 682 slots (2048/3) of a such a save point,
|
||||
// 3. most recently finalized state
|
||||
// 4. non-finalized states used by stategen
|
||||
func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint types.Slot) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB. CleanUpDirtyStates")
|
||||
defer span.End()
|
||||
@@ -776,24 +641,61 @@ func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint ty
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
deletedRoots := make([][32]byte, 0)
|
||||
finalizedRoot := bytesutil.ToBytes32(f.Root)
|
||||
|
||||
// We usually archive a state every 2048 slots. If a slot with value % 2048 == 0 is skipped,
|
||||
// we will store the last un-skipped state instead. We don't know exactly how far back that state could be
|
||||
// from the skipped one, but a fudge factor of roughly 1/3 of the interval was chosen based on looking
|
||||
// at chain history for guidance. 1/3 of the default interval (2048) comes out to about 682 slots (or ~21 epochs).
|
||||
intervalTopThird := slotsPerArchivedPoint - slotsPerArchivedPoint/3
|
||||
|
||||
seen := 0
|
||||
toDelete := make([][32]byte, 0)
|
||||
err = s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(stateSlotIndicesBucket)
|
||||
return bkt.ForEach(func(k, v []byte) error {
|
||||
bkt := tx.Bucket(stateBucket)
|
||||
bbkt := tx.Bucket(blocksBucket)
|
||||
return bkt.ForEach(func(k, _ []byte) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
finalizedChkpt := bytesutil.ToBytes32(f.Root) == bytesutil.ToBytes32(v)
|
||||
slot := bytesutil.BytesToSlotBigEndian(k)
|
||||
mod := slot % slotsPerArchivedPoint
|
||||
nonFinalized := slot > finalizedSlot
|
||||
|
||||
// The following conditions cover 1, 2, 3 and 4 above.
|
||||
if mod != 0 && mod <= slotsPerArchivedPoint-slotsPerArchivedPoint/3 && !finalizedChkpt && !nonFinalized {
|
||||
deletedRoots = append(deletedRoots, bytesutil.ToBytes32(v))
|
||||
seen += 1
|
||||
// If we could cheaply and easily read the first 50 or so bytes of the state,
|
||||
// we could pull the slot from the ssz-encoded bytes. But the state is very large (> 50MB) and
|
||||
// we need to read the entire thing to snappy.Decode it, so this code is betting that it's cheaper
|
||||
// to grab the corresponding block and decode that instead.
|
||||
enc := bbkt.Get(k[:32])
|
||||
if enc == nil {
|
||||
// the database is in an unexpected state, we should error out to prevent anything destructive.
|
||||
log.WithField("root", hexutil.Encode(k)).Error("Could not find block corresponding to saved state")
|
||||
return errors.Wrapf(errSavedStateMissingBlock, "root=%#x", k)
|
||||
}
|
||||
enc, err = snappy.Decode(nil, enc)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to snappy.Decode block with root=%#x", k)
|
||||
}
|
||||
slot, err := detect.SlotFromBlock(enc)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to extract slot from block with root=%#x", k)
|
||||
}
|
||||
mod := slot % slotsPerArchivedPoint
|
||||
// state is on an archive point, or within the final 1/3 of the interval (case 1 & 2)
|
||||
if mod == 0 || mod > intervalTopThird {
|
||||
return nil
|
||||
}
|
||||
|
||||
// don't delete the state integrating the latest finalized block (case 3)
|
||||
if bytesutil.ToBytes32(k) == finalizedRoot {
|
||||
return nil
|
||||
}
|
||||
|
||||
// don't delete states that haven't finalized yet - they may be in-use by the hot state cache (case 4)
|
||||
if slot > finalizedSlot {
|
||||
return nil
|
||||
}
|
||||
|
||||
// delete everything else!
|
||||
toDelete = append(toDelete, bytesutil.ToBytes32(k))
|
||||
return nil
|
||||
})
|
||||
})
|
||||
@@ -801,13 +703,13 @@ func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint ty
|
||||
return err
|
||||
}
|
||||
|
||||
// Length of to be deleted roots is 0. Nothing to do.
|
||||
if len(deletedRoots) == 0 {
|
||||
if len(toDelete) == 0 {
|
||||
log.WithField("db_total", seen).Info("No dirty states to clean up")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.WithField("count", len(deletedRoots)).Info("Cleaning up dirty states")
|
||||
if err := s.DeleteStates(ctx, deletedRoots); err != nil {
|
||||
log.WithField("db_total", seen).WithField("dirty", len(toDelete)).Info("Cleaning up dirty states")
|
||||
if err := s.DeleteStates(ctx, toDelete); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package kv
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -468,102 +469,13 @@ func TestStore_DeleteHeadState(t *testing.T) {
|
||||
require.NoError(t, db.DeleteState(ctx, headBlockRoot)) // Ok to delete head state if it's optimistic.
|
||||
}
|
||||
|
||||
func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(context.Background(), wsb))
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(1))
|
||||
s0 := st.InnerStateUnsafe()
|
||||
require.NoError(t, db.SaveState(context.Background(), st, r))
|
||||
|
||||
b.Block.Slot = 100
|
||||
r1, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(context.Background(), wsb))
|
||||
st, err = util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
s1 := st.InnerStateUnsafe()
|
||||
require.NoError(t, db.SaveState(context.Background(), st, r1))
|
||||
|
||||
b.Block.Slot = 1000
|
||||
r2, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(context.Background(), wsb))
|
||||
st, err = util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(1000))
|
||||
s2 := st.InnerStateUnsafe()
|
||||
|
||||
require.NoError(t, db.SaveState(context.Background(), st, r2))
|
||||
|
||||
highest, err := db.HighestSlotStatesBelow(context.Background(), 2)
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, highest[0].InnerStateUnsafe(), s0)
|
||||
|
||||
highest, err = db.HighestSlotStatesBelow(context.Background(), 101)
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, highest[0].InnerStateUnsafe(), s1)
|
||||
|
||||
highest, err = db.HighestSlotStatesBelow(context.Background(), 1001)
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, highest[0].InnerStateUnsafe(), s2)
|
||||
}
|
||||
|
||||
func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
genesisState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
genesisRoot := [32]byte{'a'}
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot))
|
||||
require.NoError(t, db.SaveState(context.Background(), genesisState, genesisRoot))
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(context.Background(), wsb))
|
||||
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(1))
|
||||
require.NoError(t, db.SaveState(context.Background(), st, r))
|
||||
|
||||
highest, err := db.HighestSlotStatesBelow(context.Background(), 2)
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, highest[0].InnerStateUnsafe(), st.InnerStateUnsafe())
|
||||
|
||||
highest, err = db.HighestSlotStatesBelow(context.Background(), 1)
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, highest[0].InnerStateUnsafe(), genesisState.InnerStateUnsafe())
|
||||
highest, err = db.HighestSlotStatesBelow(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, highest[0].InnerStateUnsafe(), genesisState.InnerStateUnsafe())
|
||||
}
|
||||
|
||||
func TestStore_CleanUpDirtyStates_AboveThreshold(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
genesisState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
genesisRoot := [32]byte{'a'}
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot))
|
||||
require.NoError(t, db.SaveState(context.Background(), genesisState, genesisRoot))
|
||||
require.NoError(t, db.SaveGenesisData(context.Background(), genesisState))
|
||||
|
||||
bRoots := make([][32]byte, 0)
|
||||
slotsPerArchivedPoint := types.Slot(128)
|
||||
@@ -592,11 +504,12 @@ func TestStore_CleanUpDirtyStates_AboveThreshold(t *testing.T) {
|
||||
}))
|
||||
require.NoError(t, db.CleanUpDirtyStates(context.Background(), slotsPerArchivedPoint))
|
||||
|
||||
threshold := slotsPerArchivedPoint.SubSlot(slotsPerArchivedPoint.Div(3))
|
||||
for i, root := range bRoots {
|
||||
if types.Slot(i) >= slotsPerArchivedPoint.SubSlot(slotsPerArchivedPoint.Div(3)) {
|
||||
if types.Slot(i) >= threshold {
|
||||
require.Equal(t, true, db.HasState(context.Background(), root))
|
||||
} else {
|
||||
require.Equal(t, false, db.HasState(context.Background(), root))
|
||||
require.Equal(t, false, db.HasState(context.Background(), root), fmt.Sprintf("slot=%d, threshold=%d", i, threshold))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -606,9 +519,9 @@ func TestStore_CleanUpDirtyStates_Finalized(t *testing.T) {
|
||||
|
||||
genesisState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
genesisRoot := [32]byte{'a'}
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot))
|
||||
require.NoError(t, db.SaveState(context.Background(), genesisState, genesisRoot))
|
||||
require.NoError(t, db.SaveGenesisData(context.Background(), genesisState))
|
||||
genesisRoot, err := db.GenesisBlockRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := types.Slot(1); i <= params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
b := util.NewBeaconBlock()
|
||||
@@ -635,9 +548,9 @@ func TestStore_CleanUpDirtyStates_DontDeleteNonFinalized(t *testing.T) {
|
||||
|
||||
genesisState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
genesisRoot := [32]byte{'a'}
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), genesisRoot))
|
||||
require.NoError(t, db.SaveState(context.Background(), genesisState, genesisRoot))
|
||||
require.NoError(t, db.SaveGenesisData(context.Background(), genesisState))
|
||||
genesisRoot, err := db.GenesisBlockRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
var unfinalizedRoots [][32]byte
|
||||
for i := types.Slot(1); i <= params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
|
||||
@@ -22,7 +22,7 @@ func TestRestore(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
|
||||
backupDb, err := kv.NewKVStore(context.Background(), t.TempDir(), &kv.Config{})
|
||||
backupDb, err := kv.NewKVStore(context.Background(), t.TempDir())
|
||||
require.NoError(t, err)
|
||||
head := util.NewBeaconBlock()
|
||||
head.Block.Slot = 5000
|
||||
@@ -58,7 +58,7 @@ func TestRestore(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(files))
|
||||
assert.Equal(t, kv.DatabaseFileName, files[0].Name())
|
||||
restoredDb, err := kv.NewKVStore(context.Background(), path.Join(restoreDir, kv.BeaconNodeDbDirName), &kv.Config{})
|
||||
restoredDb, err := kv.NewKVStore(context.Background(), path.Join(restoreDir, kv.BeaconNodeDbDirName))
|
||||
defer func() {
|
||||
require.NoError(t, restoredDb.Close())
|
||||
}()
|
||||
|
||||
@@ -21,13 +21,10 @@ const (
|
||||
// DatabaseFileName is the name of the beacon node database.
|
||||
DatabaseFileName = "slasher.db"
|
||||
boltAllocSize = 8 * 1024 * 1024
|
||||
// Specifies the initial mmap size of bolt.
|
||||
mmapSize = 536870912
|
||||
)
|
||||
|
||||
// Config for the bolt db kv store.
|
||||
type Config struct {
|
||||
InitialMMapSize int
|
||||
}
|
||||
|
||||
// Store defines an implementation of the Prysm Database interface
|
||||
// using BoltDB as the underlying persistent kv-store for Ethereum consensus.
|
||||
type Store struct {
|
||||
@@ -39,7 +36,7 @@ type Store struct {
|
||||
// NewKVStore initializes a new boltDB key-value store at the directory
|
||||
// path specified, creates the kv-buckets based on the schema, and stores
|
||||
// an open connection db object as a property of the Store struct.
|
||||
func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, error) {
|
||||
func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
|
||||
hasDir, err := file.HasDir(dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -55,7 +52,7 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
params.BeaconIoConfig().ReadWritePermissions,
|
||||
&bolt.Options{
|
||||
Timeout: 1 * time.Second,
|
||||
InitialMmapSize: config.InitialMMapSize,
|
||||
InitialMmapSize: mmapSize,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
// setupDB instantiates and returns a Store instance.
|
||||
func setupDB(t testing.TB) *Store {
|
||||
db, err := NewKVStore(context.Background(), t.TempDir(), &Config{})
|
||||
db, err := NewKVStore(context.Background(), t.TempDir())
|
||||
require.NoError(t, err, "Failed to instantiate DB")
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close(), "Failed to close database")
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
|
||||
// SetupDB instantiates and returns database backed by key value store.
|
||||
func SetupDB(t testing.TB) db.Database {
|
||||
s, err := kv.NewKVStore(context.Background(), t.TempDir(), &kv.Config{})
|
||||
s, err := kv.NewKVStore(context.Background(), t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -28,7 +28,7 @@ func SetupDB(t testing.TB) db.Database {
|
||||
|
||||
// SetupSlasherDB --
|
||||
func SetupSlasherDB(t testing.TB) iface.SlasherDatabase {
|
||||
s, err := slasherkv.NewKVStore(context.Background(), t.TempDir(), &slasherkv.Config{})
|
||||
s, err := slasherkv.NewKVStore(context.Background(), t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -403,6 +403,10 @@ func (s *Service) processBlockInBatch(ctx context.Context, currentBlockNum uint6
|
||||
}
|
||||
}
|
||||
|
||||
s.latestEth1DataLock.RLock()
|
||||
lastReqBlock := s.latestEth1Data.LastRequestedBlock
|
||||
s.latestEth1DataLock.RUnlock()
|
||||
|
||||
for _, filterLog := range logs {
|
||||
if filterLog.BlockNumber > currentBlockNum {
|
||||
if err := s.checkHeaderRange(ctx, currentBlockNum, filterLog.BlockNumber-1, headersMap, requestHeaders); err != nil {
|
||||
@@ -415,6 +419,13 @@ func (s *Service) processBlockInBatch(ctx context.Context, currentBlockNum uint6
|
||||
currentBlockNum = filterLog.BlockNumber
|
||||
}
|
||||
if err := s.ProcessLog(ctx, filterLog); err != nil {
|
||||
// In the event the execution client gives us a garbled/bad log
|
||||
// we reset the last requested block to the previous valid block range. This
|
||||
// prevents the beacon from advancing processing of logs to another range
|
||||
// in the event of an execution client failure.
|
||||
s.latestEth1DataLock.Lock()
|
||||
s.latestEth1Data.LastRequestedBlock = lastReqBlock
|
||||
s.latestEth1DataLock.Unlock()
|
||||
return 0, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,6 +36,14 @@ func WithHttpEndpointAndJWTSecret(endpointString string, secret []byte) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithHeaders adds headers to the execution node JSON-RPC requests.
|
||||
func WithHeaders(headers []string) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.headers = headers
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDepositContractAddress for the deposit contract.
|
||||
func WithDepositContractAddress(addr common.Address) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
@@ -65,7 +66,6 @@ func (s *Service) pollConnectionStatus(ctx context.Context) {
|
||||
currClient := s.rpcClient
|
||||
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
|
||||
errorLogger(err, "Could not connect to execution client endpoint")
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
continue
|
||||
}
|
||||
// Close previous client, if connection was successful.
|
||||
@@ -114,7 +114,7 @@ func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.End
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "":
|
||||
case "", "ipc":
|
||||
client, err = gethRPC.DialIPC(ctx, endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -129,6 +129,16 @@ func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.End
|
||||
}
|
||||
client.SetHeader("Authorization", header)
|
||||
}
|
||||
for _, h := range s.cfg.headers {
|
||||
if h != "" {
|
||||
keyValue := strings.Split(h, "=")
|
||||
if len(keyValue) < 2 {
|
||||
log.Warnf("Incorrect HTTP header flag format. Skipping %v", keyValue[0])
|
||||
continue
|
||||
}
|
||||
client.SetHeader(keyValue[0], strings.Join(keyValue[1:], "="))
|
||||
}
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -128,6 +128,7 @@ type config struct {
|
||||
eth1HeaderReqLimit uint64
|
||||
beaconNodeStatsUpdater BeaconNodeStatsUpdater
|
||||
currHttpEndpoint network.Endpoint
|
||||
headers []string
|
||||
finalizedStateAtStartup state.BeaconState
|
||||
}
|
||||
|
||||
@@ -316,11 +317,6 @@ func (s *Service) updateBeaconNodeStats() {
|
||||
s.cfg.beaconNodeStatsUpdater.Update(bs)
|
||||
}
|
||||
|
||||
func (s *Service) updateCurrHttpEndpoint(endpoint network.Endpoint) {
|
||||
s.cfg.currHttpEndpoint = endpoint
|
||||
s.updateBeaconNodeStats()
|
||||
}
|
||||
|
||||
func (s *Service) updateConnectedETH1(state bool) {
|
||||
s.connectedETH1 = state
|
||||
s.updateBeaconNodeStats()
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd:__subpackages__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
@@ -17,6 +18,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -31,6 +31,7 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
@@ -62,12 +63,14 @@ go_test(
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v3:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -3,6 +3,7 @@ package doublylinkedtree
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
@@ -82,7 +84,8 @@ func (f *ForkChoice) Head(
|
||||
|
||||
jc := f.JustifiedCheckpoint()
|
||||
fc := f.FinalizedCheckpoint()
|
||||
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch); err != nil {
|
||||
currentEpoch := slots.EpochsSinceGenesis(time.Unix(int64(f.store.genesisTime), 0))
|
||||
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not update best descendant")
|
||||
}
|
||||
return f.store.head(ctx)
|
||||
@@ -183,11 +186,15 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
|
||||
return err
|
||||
}
|
||||
jcRoot := bytesutil.ToBytes32(jc.Root)
|
||||
// Releasing here the checkpoints lock because
|
||||
// AncestorRoot acquires a lock on nodes and that can
|
||||
// cause a double lock.
|
||||
f.store.checkpointsLock.Unlock()
|
||||
root, err := f.AncestorRoot(ctx, jcRoot, jSlot)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
f.store.checkpointsLock.Lock()
|
||||
if root == currentRoot {
|
||||
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
@@ -296,7 +303,8 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
|
||||
}
|
||||
|
||||
// updateBalances updates the balances that directly voted for each block taking into account the
|
||||
// validators' latest votes. This function requires a lock in Store.nodesLock.
|
||||
// validators' latest votes. This function requires a lock in Store.nodesLock
|
||||
// and votesLock
|
||||
func (f *ForkChoice) updateBalances(newBalances []uint64) error {
|
||||
for index, vote := range f.votes {
|
||||
// Skip if validator has been slashed
|
||||
@@ -424,6 +432,9 @@ func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoo
|
||||
// store-tracked list. Votes from these validators are not accounted for
|
||||
// in forkchoice.
|
||||
func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index types.ValidatorIndex) {
|
||||
f.votesLock.RLock()
|
||||
defer f.votesLock.RUnlock()
|
||||
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
// return early if the index was already included:
|
||||
@@ -433,8 +444,6 @@ func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index types.Validator
|
||||
f.store.slashedIndices[index] = true
|
||||
|
||||
// Subtract last vote from this equivocating validator
|
||||
f.votesLock.RLock()
|
||||
defer f.votesLock.RUnlock()
|
||||
|
||||
if index >= types.ValidatorIndex(len(f.balances)) {
|
||||
return
|
||||
@@ -484,30 +493,31 @@ func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *forkchoicetypes.Checkpoint) e
|
||||
}
|
||||
|
||||
// CommonAncestorRoot returns the common ancestor root between the two block roots r1 and r2.
|
||||
func (f *ForkChoice) CommonAncestorRoot(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, error) {
|
||||
func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, types.Slot, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "doublelinkedtree.CommonAncestorRoot")
|
||||
defer span.End()
|
||||
|
||||
// Do nothing if the input roots are the same.
|
||||
if r1 == r2 {
|
||||
return r1, nil
|
||||
}
|
||||
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
n1, ok := f.store.nodeByRoot[r1]
|
||||
if !ok || n1 == nil {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
|
||||
// Do nothing if the input roots are the same.
|
||||
if r1 == r2 {
|
||||
return r1, n1.slot, nil
|
||||
}
|
||||
|
||||
n2, ok := f.store.nodeByRoot[r2]
|
||||
if !ok || n2 == nil {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return [32]byte{}, ctx.Err()
|
||||
return [32]byte{}, 0, ctx.Err()
|
||||
}
|
||||
if n1.slot > n2.slot {
|
||||
n1 = n1.parent
|
||||
@@ -515,17 +525,17 @@ func (f *ForkChoice) CommonAncestorRoot(ctx context.Context, r1 [32]byte, r2 [32
|
||||
// This should not happen at runtime as the finalized
|
||||
// node has to be a common ancestor
|
||||
if n1 == nil {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
} else {
|
||||
n2 = n2.parent
|
||||
// Reaches the end of the tree and unable to find common ancestor.
|
||||
if n2 == nil {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
}
|
||||
if n1 == n2 {
|
||||
return n1.root, nil
|
||||
return n1.root, n1.slot, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -606,3 +616,52 @@ func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
|
||||
}
|
||||
return node.payloadHash
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns a full dump of forkhoice.
|
||||
func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceResponse, error) {
|
||||
jc := &v1.Checkpoint{
|
||||
Epoch: f.store.justifiedCheckpoint.Epoch,
|
||||
Root: f.store.justifiedCheckpoint.Root[:],
|
||||
}
|
||||
bjc := &v1.Checkpoint{
|
||||
Epoch: f.store.bestJustifiedCheckpoint.Epoch,
|
||||
Root: f.store.bestJustifiedCheckpoint.Root[:],
|
||||
}
|
||||
ujc := &v1.Checkpoint{
|
||||
Epoch: f.store.unrealizedJustifiedCheckpoint.Epoch,
|
||||
Root: f.store.unrealizedJustifiedCheckpoint.Root[:],
|
||||
}
|
||||
fc := &v1.Checkpoint{
|
||||
Epoch: f.store.finalizedCheckpoint.Epoch,
|
||||
Root: f.store.finalizedCheckpoint.Root[:],
|
||||
}
|
||||
ufc := &v1.Checkpoint{
|
||||
Epoch: f.store.unrealizedFinalizedCheckpoint.Epoch,
|
||||
Root: f.store.unrealizedFinalizedCheckpoint.Root[:],
|
||||
}
|
||||
nodes := make([]*v1.ForkChoiceNode, 0, f.NodeCount())
|
||||
var err error
|
||||
if f.store.treeRootNode != nil {
|
||||
nodes, err = f.store.treeRootNode.nodeTreeDump(ctx, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var headRoot [32]byte
|
||||
if f.store.headNode != nil {
|
||||
headRoot = f.store.headNode.root
|
||||
}
|
||||
resp := &v1.ForkChoiceResponse{
|
||||
JustifiedCheckpoint: jc,
|
||||
BestJustifiedCheckpoint: bjc,
|
||||
UnrealizedJustifiedCheckpoint: ujc,
|
||||
FinalizedCheckpoint: fc,
|
||||
UnrealizedFinalizedCheckpoint: ufc,
|
||||
ProposerBoostRoot: f.store.proposerBoostRoot[:],
|
||||
PreviousProposerBoostRoot: f.store.previousProposerBoostRoot[:],
|
||||
HeadRoot: headRoot[:],
|
||||
ForkchoiceNodes: nodes,
|
||||
}
|
||||
return resp, nil
|
||||
|
||||
}
|
||||
|
||||
@@ -208,7 +208,7 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'2'}].weight)
|
||||
|
||||
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1))
|
||||
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
|
||||
f.store.nodesLock.Unlock()
|
||||
|
||||
@@ -408,73 +408,85 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
r1 [32]byte
|
||||
r2 [32]byte
|
||||
wantRoot [32]byte
|
||||
wantSlot types.Slot
|
||||
}{
|
||||
{
|
||||
name: "Common ancestor between c and b is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between c and d is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'d'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between c and e is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'e'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between g and f is c",
|
||||
r1: [32]byte{'g'},
|
||||
r2: [32]byte{'f'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between f and h is c",
|
||||
r1: [32]byte{'f'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between g and h is c",
|
||||
r1: [32]byte{'g'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between b and h is a",
|
||||
r1: [32]byte{'b'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between e and h is a",
|
||||
r1: [32]byte{'e'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between i and f is c",
|
||||
r1: [32]byte{'i'},
|
||||
r2: [32]byte{'f'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between e and h is a",
|
||||
r1: [32]byte{'j'},
|
||||
r2: [32]byte{'g'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
|
||||
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wantRoot, gotRoot)
|
||||
require.Equal(t, tc.wantSlot, gotSlot)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -497,46 +509,53 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
r1 [32]byte
|
||||
r2 [32]byte
|
||||
wantRoot [32]byte
|
||||
wantSlot types.Slot
|
||||
}{
|
||||
{
|
||||
name: "Common ancestor between a and b is a",
|
||||
r1: [32]byte{'a'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between b and d is b",
|
||||
r1: [32]byte{'d'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'b'},
|
||||
wantSlot: 1,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between d and a is a",
|
||||
r1: [32]byte{'d'},
|
||||
r2: [32]byte{'a'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
|
||||
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wantRoot, gotRoot)
|
||||
require.Equal(t, tc.wantSlot, gotSlot)
|
||||
})
|
||||
}
|
||||
|
||||
// Equal inputs should return the same root.
|
||||
r, err := f.CommonAncestorRoot(ctx, [32]byte{'b'}, [32]byte{'b'})
|
||||
r, s, err := f.CommonAncestor(ctx, [32]byte{'b'}, [32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'b'}, r)
|
||||
require.Equal(t, types.Slot(1), s)
|
||||
// Requesting finalized root (last node) should return the same root.
|
||||
r, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'a'})
|
||||
r, s, err = f.CommonAncestor(ctx, [32]byte{'a'}, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'a'}, r)
|
||||
require.Equal(t, types.Slot(0), s)
|
||||
// Requesting unknown root
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'z'})
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'a'}, [32]byte{'z'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'z'}, [32]byte{'a'})
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'z'}, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
n := &Node{
|
||||
slot: 100,
|
||||
@@ -550,7 +569,7 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
|
||||
f.store.nodeByRoot[[32]byte{'y'}] = n
|
||||
// broken link
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'y'}, [32]byte{'a'})
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'y'}, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
}
|
||||
|
||||
|
||||
@@ -5,8 +5,10 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
)
|
||||
|
||||
// depth returns the length of the path to the root of Fork Choice
|
||||
@@ -40,8 +42,9 @@ func (n *Node) applyWeightChanges(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateBestDescendant updates the best descendant of this node and its children.
|
||||
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch types.Epoch) error {
|
||||
// updateBestDescendant updates the best descendant of this node and its
|
||||
// children. This function assumes the caller has a lock on Store.nodesLock
|
||||
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch, currentEpoch types.Epoch) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
@@ -57,10 +60,10 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
|
||||
if child == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not update best descendant")
|
||||
}
|
||||
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch); err != nil {
|
||||
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, finalizedEpoch)
|
||||
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, finalizedEpoch, currentEpoch)
|
||||
if childLeadsToViableHead && !hasViableDescendant {
|
||||
// The child leads to a viable head, but the current
|
||||
// parent's best child doesn't.
|
||||
@@ -95,18 +98,24 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
|
||||
// viableForHead returns true if the node is viable to head.
|
||||
// Any node with different finalized or justified epoch than
|
||||
// the ones in fork choice store should not be viable to head.
|
||||
func (n *Node) viableForHead(justifiedEpoch, finalizedEpoch types.Epoch) bool {
|
||||
func (n *Node) viableForHead(justifiedEpoch, finalizedEpoch, currentEpoch types.Epoch) bool {
|
||||
justified := justifiedEpoch == n.justifiedEpoch || justifiedEpoch == 0
|
||||
finalized := finalizedEpoch == n.finalizedEpoch || finalizedEpoch == 0
|
||||
if features.Get().EnableDefensivePull && !justified && justifiedEpoch+1 == currentEpoch {
|
||||
if n.unrealizedJustifiedEpoch+1 >= currentEpoch {
|
||||
justified = true
|
||||
finalized = true
|
||||
}
|
||||
}
|
||||
|
||||
return justified && finalized
|
||||
}
|
||||
|
||||
func (n *Node) leadsToViableHead(justifiedEpoch, finalizedEpoch types.Epoch) bool {
|
||||
func (n *Node) leadsToViableHead(justifiedEpoch, finalizedEpoch, currentEpoch types.Epoch) bool {
|
||||
if n.bestDescendant == nil {
|
||||
return n.viableForHead(justifiedEpoch, finalizedEpoch)
|
||||
return n.viableForHead(justifiedEpoch, finalizedEpoch, currentEpoch)
|
||||
}
|
||||
return n.bestDescendant.viableForHead(justifiedEpoch, finalizedEpoch)
|
||||
return n.bestDescendant.viableForHead(justifiedEpoch, finalizedEpoch, currentEpoch)
|
||||
}
|
||||
|
||||
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
|
||||
@@ -115,10 +124,48 @@ func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if !n.optimistic || n.parent == nil {
|
||||
if !n.optimistic {
|
||||
return nil
|
||||
}
|
||||
|
||||
n.optimistic = false
|
||||
|
||||
if n.parent == nil {
|
||||
return nil
|
||||
}
|
||||
return n.parent.setNodeAndParentValidated(ctx)
|
||||
}
|
||||
|
||||
// nodeTreeDump appends to the given list all the nodes descending from this one
|
||||
func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]*v1.ForkChoiceNode, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
var parentRoot [32]byte
|
||||
if n.parent != nil {
|
||||
parentRoot = n.parent.root
|
||||
}
|
||||
thisNode := &v1.ForkChoiceNode{
|
||||
Slot: n.slot,
|
||||
Root: n.root[:],
|
||||
ParentRoot: parentRoot[:],
|
||||
JustifiedEpoch: n.justifiedEpoch,
|
||||
FinalizedEpoch: n.finalizedEpoch,
|
||||
UnrealizedJustifiedEpoch: n.unrealizedJustifiedEpoch,
|
||||
UnrealizedFinalizedEpoch: n.unrealizedFinalizedEpoch,
|
||||
Balance: n.balance,
|
||||
Weight: n.weight,
|
||||
ExecutionOptimistic: n.optimistic,
|
||||
ExecutionPayload: n.payloadHash[:],
|
||||
Timestamp: n.timestamp,
|
||||
}
|
||||
|
||||
nodes = append(nodes, thisNode)
|
||||
var err error
|
||||
for _, child := range n.children {
|
||||
nodes, err = child.nodeTreeDump(ctx, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
)
|
||||
@@ -113,7 +114,7 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 100
|
||||
s.nodeByRoot[indexToHash(2)].weight = 200
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1))
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
|
||||
assert.Equal(t, 2, len(s.treeRootNode.children))
|
||||
assert.Equal(t, s.treeRootNode.children[1], s.treeRootNode.bestDescendant)
|
||||
@@ -133,7 +134,7 @@ func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 200
|
||||
s.nodeByRoot[indexToHash(2)].weight = 100
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1))
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
|
||||
assert.Equal(t, 2, len(s.treeRootNode.children))
|
||||
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
|
||||
@@ -173,7 +174,7 @@ func TestNode_ViableForHead(t *testing.T) {
|
||||
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, 3, true},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
got := tc.n.viableForHead(tc.justifiedEpoch, tc.finalizedEpoch)
|
||||
got := tc.n.viableForHead(tc.justifiedEpoch, tc.finalizedEpoch, 5)
|
||||
assert.Equal(t, tc.want, got)
|
||||
}
|
||||
}
|
||||
@@ -197,15 +198,17 @@ func TestNode_LeadsToViableHead(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 3))
|
||||
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 3))
|
||||
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 3))
|
||||
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 3))
|
||||
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 3, 5))
|
||||
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 3, 5))
|
||||
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 3, 5))
|
||||
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 3, 5))
|
||||
}
|
||||
|
||||
func TestNode_SetFullyValidated(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
storeNodes := make([]*Node, 6)
|
||||
storeNodes[0] = f.store.treeRootNode
|
||||
// insert blocks in the fork pattern (optimistic status in parenthesis)
|
||||
//
|
||||
// 0 (false) -- 1 (false) -- 2 (false) -- 3 (true) -- 4 (true)
|
||||
@@ -215,20 +218,25 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[1] = f.store.nodeByRoot[blkRoot]
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, params.BeaconConfig().ZeroHash))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[2] = f.store.nodeByRoot[blkRoot]
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(1)))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[3] = f.store.nodeByRoot[blkRoot]
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[4] = f.store.nodeByRoot[blkRoot]
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[5] = f.store.nodeByRoot[blkRoot]
|
||||
|
||||
opt, err := f.IsOptimistic(indexToHash(5))
|
||||
require.NoError(t, err)
|
||||
@@ -253,4 +261,22 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
opt, err = f.IsOptimistic(indexToHash(3))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, opt)
|
||||
|
||||
respNodes := make([]*v1.ForkChoiceNode, 0)
|
||||
respNodes, err = f.store.treeRootNode.nodeTreeDump(ctx, respNodes)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(respNodes), f.NodeCount())
|
||||
|
||||
for i, respNode := range respNodes {
|
||||
require.Equal(t, storeNodes[i].slot, respNode.Slot)
|
||||
require.DeepEqual(t, storeNodes[i].root[:], respNode.Root)
|
||||
require.Equal(t, storeNodes[i].balance, respNode.Balance)
|
||||
require.Equal(t, storeNodes[i].weight, respNode.Weight)
|
||||
require.Equal(t, storeNodes[i].optimistic, respNode.ExecutionOptimistic)
|
||||
require.Equal(t, storeNodes[i].justifiedEpoch, respNode.JustifiedEpoch)
|
||||
require.Equal(t, storeNodes[i].unrealizedJustifiedEpoch, respNode.UnrealizedJustifiedEpoch)
|
||||
require.Equal(t, storeNodes[i].finalizedEpoch, respNode.FinalizedEpoch)
|
||||
require.Equal(t, storeNodes[i].unrealizedFinalizedEpoch, respNode.UnrealizedFinalizedEpoch)
|
||||
require.Equal(t, storeNodes[i].timestamp, respNode.Timestamp)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,15 +41,14 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
}
|
||||
|
||||
// Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
|
||||
f.store.checkpointsLock.Lock()
|
||||
|
||||
f.store.checkpointsLock.RLock()
|
||||
bjcp := f.store.bestJustifiedCheckpoint
|
||||
jcp := f.store.justifiedCheckpoint
|
||||
fcp := f.store.finalizedCheckpoint
|
||||
f.store.checkpointsLock.RUnlock()
|
||||
if bjcp.Epoch > jcp.Epoch {
|
||||
finalizedSlot, err := slots.EpochStart(fcp.Epoch)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -59,15 +58,15 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
// loop call here.
|
||||
r, err := f.AncestorRoot(ctx, bjcp.Root, finalizedSlot)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
if r == fcp.Root {
|
||||
f.store.checkpointsLock.Lock()
|
||||
f.store.prevJustifiedCheckpoint = jcp
|
||||
f.store.justifiedCheckpoint = bjcp
|
||||
f.store.checkpointsLock.Unlock()
|
||||
}
|
||||
}
|
||||
f.store.checkpointsLock.Unlock()
|
||||
if !features.Get().DisablePullTips {
|
||||
f.updateUnrealizedCheckpoints()
|
||||
}
|
||||
|
||||
@@ -389,3 +389,14 @@ func TestSetOptimisticToInvalid_ForkAtMerge_bis(t *testing.T) {
|
||||
})
|
||||
require.DeepEqual(t, roots, [][32]byte{{'b'}, {'c'}, {'d'}, {'e'}})
|
||||
}
|
||||
|
||||
func TestSetOptimisticToValid(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
op, err := f.IsOptimistic([32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, op)
|
||||
require.NoError(t, f.SetOptimisticToValid(context.Background(), [32]byte{}))
|
||||
op, err = f.IsOptimistic([32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, op)
|
||||
}
|
||||
|
||||
@@ -95,8 +95,8 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
if bestDescendant == nil {
|
||||
bestDescendant = justifiedNode
|
||||
}
|
||||
|
||||
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch) {
|
||||
currentEpoch := slots.EpochsSinceGenesis(time.Unix(int64(s.genesisTime), 0))
|
||||
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch, currentEpoch) {
|
||||
s.allTipsAreInvalid = true
|
||||
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch, justified Epoch %d, %d != %d, %d",
|
||||
bestDescendant.slot, bestDescendant.weight/10e9, bestDescendant.finalizedEpoch, bestDescendant.justifiedEpoch, s.finalizedCheckpoint.Epoch, s.justifiedCheckpoint.Epoch)
|
||||
@@ -142,6 +142,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
unrealizedFinalizedEpoch: finalizedEpoch,
|
||||
optimistic: true,
|
||||
payloadHash: payloadHash,
|
||||
timestamp: uint64(time.Now().Unix()),
|
||||
}
|
||||
|
||||
s.nodeByPayload[payloadHash] = n
|
||||
@@ -170,8 +171,11 @@ func (s *Store) insert(ctx context.Context,
|
||||
}
|
||||
|
||||
// Update best descendants
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx,
|
||||
s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch); err != nil {
|
||||
s.checkpointsLock.RLock()
|
||||
jEpoch := s.justifiedCheckpoint.Epoch
|
||||
fEpoch := s.finalizedCheckpoint.Epoch
|
||||
s.checkpointsLock.RUnlock()
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,6 +59,7 @@ type Node struct {
|
||||
weight uint64 // weight of this node: the total balance including children
|
||||
bestDescendant *Node // bestDescendant node of this node.
|
||||
optimistic bool // whether the block has been fully validated or not
|
||||
timestamp uint64 // The timestamp when the node was inserted.
|
||||
}
|
||||
|
||||
// Vote defines an individual validator's vote.
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
@@ -198,31 +199,36 @@ func TestStore_NoDeadLock(t *testing.T) {
|
||||
// D justifies and comes late.
|
||||
//
|
||||
func TestStore_ForkNextEpoch(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableDefensivePull: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
|
||||
// Epoch 1 blocks (D does not arrive)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 92, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 93, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 94, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// Epoch 2 blocks
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'c'}, [32]byte{'E'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 96, [32]byte{'e'}, [32]byte{'c'}, [32]byte{'E'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 97, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'g'}, [32]byte{'f'}, [32]byte{'G'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 98, [32]byte{'g'}, [32]byte{'f'}, [32]byte{'G'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 107, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 99, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -234,16 +240,25 @@ func TestStore_ForkNextEpoch(t *testing.T) {
|
||||
require.Equal(t, types.Epoch(0), f.JustifiedCheckpoint().Epoch)
|
||||
|
||||
// D arrives late, D is head
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 95, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'d'}, 1))
|
||||
f.store.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1}
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'d'}, 2))
|
||||
f.store.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 2}
|
||||
f.updateUnrealizedCheckpoints()
|
||||
headRoot, err = f.Head(ctx, []uint64{100})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'d'}, headRoot)
|
||||
require.Equal(t, types.Epoch(1), f.JustifiedCheckpoint().Epoch)
|
||||
require.Equal(t, types.Epoch(2), f.JustifiedCheckpoint().Epoch)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
|
||||
// Set current epoch to 3, and H's unrealized checkpoint. Check it's head
|
||||
driftGenesisTime(f, 99, 0)
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'h'}, 2))
|
||||
headRoot, err = f.Head(ctx, []uint64{100})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'h'}, headRoot)
|
||||
require.Equal(t, types.Epoch(2), f.JustifiedCheckpoint().Epoch)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
)
|
||||
|
||||
// ForkChoicer represents the full fork choice interface composed of all the sub-interfaces.
|
||||
@@ -51,7 +52,7 @@ type Getter interface {
|
||||
ProposerBoost() [fieldparams.RootLength]byte
|
||||
HasParent(root [32]byte) bool
|
||||
AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([32]byte, error)
|
||||
CommonAncestorRoot(ctx context.Context, root1 [32]byte, root2 [32]byte) ([32]byte, error)
|
||||
CommonAncestor(ctx context.Context, root1 [32]byte, root2 [32]byte) ([32]byte, types.Slot, error)
|
||||
IsCanonical(root [32]byte) bool
|
||||
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
FinalizedPayloadBlockHash() [32]byte
|
||||
@@ -62,6 +63,7 @@ type Getter interface {
|
||||
NodeCount() int
|
||||
HighestReceivedBlockSlot() types.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
ForkChoiceDump(context.Context) (*v1.ForkChoiceResponse, error)
|
||||
}
|
||||
|
||||
// Setter allows to set forkchoice information
|
||||
|
||||
@@ -32,6 +32,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
|
||||
@@ -10,7 +10,9 @@ import (
|
||||
)
|
||||
|
||||
// This computes validator balance delta from validator votes.
|
||||
// It returns a list of deltas that represents the difference between old balances and new balances.
|
||||
// It returns a list of deltas that represents the difference between old
|
||||
// balances and new balances. This function assumes the caller holds a lock in
|
||||
// Store.nodesLock and Store.votesLock
|
||||
func computeDeltas(
|
||||
ctx context.Context,
|
||||
count int,
|
||||
|
||||
@@ -41,15 +41,14 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
}
|
||||
|
||||
// Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
|
||||
f.store.checkpointsLock.Lock()
|
||||
|
||||
f.store.checkpointsLock.RLock()
|
||||
bjcp := f.store.bestJustifiedCheckpoint
|
||||
jcp := f.store.justifiedCheckpoint
|
||||
fcp := f.store.finalizedCheckpoint
|
||||
f.store.checkpointsLock.RUnlock()
|
||||
if bjcp.Epoch > jcp.Epoch {
|
||||
finalizedSlot, err := slots.EpochStart(fcp.Epoch)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -59,15 +58,15 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
// loop call here.
|
||||
r, err := f.AncestorRoot(ctx, bjcp.Root, finalizedSlot)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
if r == fcp.Root {
|
||||
f.store.checkpointsLock.Lock()
|
||||
f.store.prevJustifiedCheckpoint = jcp
|
||||
f.store.justifiedCheckpoint = bjcp
|
||||
f.store.checkpointsLock.Unlock()
|
||||
}
|
||||
}
|
||||
f.store.checkpointsLock.Unlock()
|
||||
if !features.Get().DisablePullTips {
|
||||
f.updateUnrealizedCheckpoints()
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
pmath "github.com/prysmaticlabs/prysm/v3/math"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
@@ -188,11 +189,15 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
|
||||
return err
|
||||
}
|
||||
jcRoot := bytesutil.ToBytes32(jc.Root)
|
||||
// release the checkpoints lock here because
|
||||
// AncestorRoot takes a lock on nodes and that can lead
|
||||
// to double locks
|
||||
f.store.checkpointsLock.Unlock()
|
||||
root, err := f.AncestorRoot(ctx, jcRoot, jSlot)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
f.store.checkpointsLock.Lock()
|
||||
if root == currentRoot {
|
||||
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
@@ -277,47 +282,51 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
|
||||
}
|
||||
|
||||
// CommonAncestorRoot returns the common ancestor root between the two block roots r1 and r2.
|
||||
func (f *ForkChoice) CommonAncestorRoot(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, error) {
|
||||
func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, types.Slot, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "protoArray.CommonAncestorRoot")
|
||||
defer span.End()
|
||||
|
||||
// Do nothing if the two input roots are the same.
|
||||
if r1 == r2 {
|
||||
return r1, nil
|
||||
}
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
i1, ok := f.store.nodesIndices[r1]
|
||||
if !ok || i1 >= uint64(len(f.store.nodes)) {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
|
||||
// Do nothing if the two input roots are the same.
|
||||
if r1 == r2 {
|
||||
n1 := f.store.nodes[i1]
|
||||
return r1, n1.slot, nil
|
||||
}
|
||||
|
||||
i2, ok := f.store.nodesIndices[r2]
|
||||
if !ok || i2 >= uint64(len(f.store.nodes)) {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return [32]byte{}, ctx.Err()
|
||||
return [32]byte{}, 0, ctx.Err()
|
||||
}
|
||||
if i1 > i2 {
|
||||
n1 := f.store.nodes[i1]
|
||||
i1 = n1.parent
|
||||
// Reaches the end of the tree and unable to find common ancestor.
|
||||
if i1 >= uint64(len(f.store.nodes)) {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
} else {
|
||||
n2 := f.store.nodes[i2]
|
||||
i2 = n2.parent
|
||||
// Reaches the end of the tree and unable to find common ancestor.
|
||||
if i2 >= uint64(len(f.store.nodes)) {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
}
|
||||
if i1 == i2 {
|
||||
n1 := f.store.nodes[i1]
|
||||
return n1.root, nil
|
||||
return n1.root, n1.slot, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -406,8 +415,12 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
|
||||
if !s.viableForHead(bestNode) {
|
||||
s.allTipsAreInvalid = true
|
||||
s.checkpointsLock.RLock()
|
||||
jEpoch := s.justifiedCheckpoint.Epoch
|
||||
fEpoch := s.finalizedCheckpoint.Epoch
|
||||
s.checkpointsLock.RUnlock()
|
||||
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch %d != %d, justifiedEpoch %d != %d",
|
||||
bestNode.slot, bestNode.weight/10e9, bestNode.finalizedEpoch, s.finalizedCheckpoint.Epoch, bestNode.justifiedEpoch, s.justifiedCheckpoint.Epoch)
|
||||
bestNode.slot, bestNode.weight/10e9, bestNode.finalizedEpoch, fEpoch, bestNode.justifiedEpoch, jEpoch)
|
||||
}
|
||||
s.allTipsAreInvalid = false
|
||||
|
||||
@@ -426,7 +439,8 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
return bestNode.root, nil
|
||||
}
|
||||
|
||||
// updateCanonicalNodes updates the canonical nodes mapping given the input block root.
|
||||
// updateCanonicalNodes updates the canonical nodes mapping given the input
|
||||
// block root. This function assumes the caller holds a lock in Store.nodesLock
|
||||
func (s *Store) updateCanonicalNodes(ctx context.Context, root [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.updateCanonicalNodes")
|
||||
defer span.End()
|
||||
@@ -548,14 +562,14 @@ func (s *Store) insert(ctx context.Context,
|
||||
if slot > s.highestReceivedSlot {
|
||||
s.highestReceivedSlot = slot
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// applyWeightChanges iterates backwards through the nodes in store. It checks all nodes parent
|
||||
// and its best child. For each node, it updates the weight with input delta and
|
||||
// back propagate the nodes' delta to its parents' delta. After scoring changes,
|
||||
// the best child is then updated along with the best descendant.
|
||||
// the best child is then updated along with the best descendant. This function
|
||||
// assumes the caller holds a lock in Store.nodesLock
|
||||
func (s *Store) applyWeightChanges(
|
||||
ctx context.Context, newBalances []uint64, delta []int,
|
||||
) error {
|
||||
@@ -900,6 +914,8 @@ func (f *ForkChoice) Tips() ([][32]byte, []types.Slot) {
|
||||
// store-tracked list. Votes from these validators are not accounted for
|
||||
// in forkchoice.
|
||||
func (f *ForkChoice) InsertSlashedIndex(ctx context.Context, index types.ValidatorIndex) {
|
||||
f.votesLock.RLock()
|
||||
defer f.votesLock.RUnlock()
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
// return early if the index was already included:
|
||||
@@ -909,9 +925,6 @@ func (f *ForkChoice) InsertSlashedIndex(ctx context.Context, index types.Validat
|
||||
f.store.slashedIndices[index] = true
|
||||
|
||||
// Subtract last vote from this equivocating validator
|
||||
f.votesLock.RLock()
|
||||
defer f.votesLock.RUnlock()
|
||||
|
||||
if index >= types.ValidatorIndex(len(f.balances)) {
|
||||
return
|
||||
}
|
||||
@@ -1069,3 +1082,7 @@ func (f *ForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (*ForkChoice) ForkChoiceDump(_ context.Context) (*v1.ForkChoiceResponse, error) {
|
||||
return nil, errors.New("ForkChoiceDump is not supported by protoarray")
|
||||
}
|
||||
|
||||
@@ -677,73 +677,85 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
r1 [32]byte
|
||||
r2 [32]byte
|
||||
wantRoot [32]byte
|
||||
wantSlot types.Slot
|
||||
}{
|
||||
{
|
||||
name: "Common ancestor between c and b is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between c and d is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'d'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between c and e is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'e'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between g and f is c",
|
||||
r1: [32]byte{'g'},
|
||||
r2: [32]byte{'f'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between f and h is c",
|
||||
r1: [32]byte{'f'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between g and h is c",
|
||||
r1: [32]byte{'g'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between b and h is a",
|
||||
r1: [32]byte{'b'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between e and h is a",
|
||||
r1: [32]byte{'e'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between i and f is c",
|
||||
r1: [32]byte{'i'},
|
||||
r2: [32]byte{'f'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between e and h is a",
|
||||
r1: [32]byte{'j'},
|
||||
r2: [32]byte{'g'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
|
||||
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wantRoot, gotRoot)
|
||||
require.Equal(t, tc.wantSlot, gotSlot)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -766,52 +778,59 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
r1 [32]byte
|
||||
r2 [32]byte
|
||||
wantRoot [32]byte
|
||||
wantSlot types.Slot
|
||||
}{
|
||||
{
|
||||
name: "Common ancestor between a and b is a",
|
||||
r1: [32]byte{'a'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between b and d is b",
|
||||
r1: [32]byte{'d'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'b'},
|
||||
wantSlot: 1,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between d and a is a",
|
||||
r1: [32]byte{'d'},
|
||||
r2: [32]byte{'a'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
|
||||
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wantRoot, gotRoot)
|
||||
require.Equal(t, tc.wantSlot, gotSlot)
|
||||
})
|
||||
}
|
||||
|
||||
// Equal inputs should return the same root.
|
||||
r, err := f.CommonAncestorRoot(ctx, [32]byte{'b'}, [32]byte{'b'})
|
||||
r, s, err := f.CommonAncestor(ctx, [32]byte{'b'}, [32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'b'}, r)
|
||||
require.Equal(t, types.Slot(1), s)
|
||||
// Requesting finalized root (last node) should return the same root.
|
||||
r, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'a'})
|
||||
r, s, err = f.CommonAncestor(ctx, [32]byte{'a'}, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'a'}, r)
|
||||
require.Equal(t, types.Slot(0), s)
|
||||
// Requesting unknown root
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'z'})
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'a'}, [32]byte{'z'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'z'}, [32]byte{'a'})
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'z'}, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'y'}, [32]byte{'z'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
// broken link
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'y'}, [32]byte{'a'})
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'y'}, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
}
|
||||
|
||||
|
||||
@@ -26,7 +26,6 @@ go_library(
|
||||
"//beacon-chain/db/slasherkv:go_default_library",
|
||||
"//beacon-chain/deterministic-genesis:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/gateway:go_default_library",
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/slasherkv"
|
||||
interopcoldstart "github.com/prysmaticlabs/prysm/v3/beacon-chain/deterministic-genesis"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/gateway"
|
||||
@@ -100,14 +99,12 @@ type BeaconNode struct {
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
forkChoiceStore forkchoice.ForkChoicer
|
||||
stateGen *stategen.State
|
||||
collector *bcnodeCollector
|
||||
slasherBlockHeadersFeed *event.Feed
|
||||
slasherAttestationsFeed *event.Feed
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
blockchainFlagOpts []blockchain.Option
|
||||
GenesisInitializer genesis.Initializer
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
}
|
||||
@@ -205,12 +202,13 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
}
|
||||
|
||||
log.Debugln("Starting State Gen")
|
||||
if err := beacon.startStateGen(ctx, bfs); err != nil {
|
||||
stg, err := beacon.startStateGen(ctx, bfs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering P2P Service")
|
||||
if err := beacon.registerP2P(cliCtx); err != nil {
|
||||
if err := beacon.registerP2P(cliCtx, stg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -229,9 +227,6 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Starting Fork Choice")
|
||||
beacon.startForkChoice()
|
||||
|
||||
log.Debugln("Registering Blockchain Service")
|
||||
if err := beacon.registerBlockchainService(); err != nil {
|
||||
return nil, err
|
||||
@@ -355,14 +350,6 @@ func (b *BeaconNode) Close() {
|
||||
close(b.stop)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startForkChoice() {
|
||||
if !features.Get().DisableForkchoiceDoublyLinkedTree {
|
||||
b.forkChoiceStore = doublylinkedtree.New()
|
||||
} else {
|
||||
b.forkChoiceStore = protoarray.New()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
|
||||
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
|
||||
@@ -371,9 +358,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
|
||||
log.WithField("database-path", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := db.NewDB(b.ctx, dbPath, &kv.Config{
|
||||
InitialMMapSize: cliCtx.Int(cmd.BoltMMapInitialSizeFlag.Name),
|
||||
})
|
||||
d, err := db.NewDB(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -395,9 +380,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
if err := d.ClearDB(); err != nil {
|
||||
return errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
d, err = db.NewDB(b.ctx, dbPath, &kv.Config{
|
||||
InitialMMapSize: cliCtx.Int(cmd.BoltMMapInitialSizeFlag.Name),
|
||||
})
|
||||
d, err = db.NewDB(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create new database")
|
||||
}
|
||||
@@ -467,9 +450,7 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
|
||||
log.WithField("database-path", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := slasherkv.NewKVStore(b.ctx, dbPath, &slasherkv.Config{
|
||||
InitialMMapSize: cliCtx.Int(cmd.BoltMMapInitialSizeFlag.Name),
|
||||
})
|
||||
d, err := slasherkv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -491,9 +472,7 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
if err := d.ClearDB(); err != nil {
|
||||
return errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
d, err = slasherkv.NewKVStore(b.ctx, dbPath, &slasherkv.Config{
|
||||
InitialMMapSize: cliCtx.Int(cmd.BoltMMapInitialSizeFlag.Name),
|
||||
})
|
||||
d, err = slasherkv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create new database")
|
||||
}
|
||||
@@ -503,13 +482,13 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startStateGen(ctx context.Context, bfs *backfill.Status) error {
|
||||
func (b *BeaconNode) startStateGen(ctx context.Context, bfs *backfill.Status) (stategen.StateManager, error) {
|
||||
opts := []stategen.StateGenOption{stategen.WithBackfillStatus(bfs)}
|
||||
sg := stategen.New(b.db, opts...)
|
||||
|
||||
cp, err := b.db.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := bytesutil.ToBytes32(cp.Root)
|
||||
@@ -517,31 +496,32 @@ func (b *BeaconNode) startStateGen(ctx context.Context, bfs *backfill.Status) er
|
||||
if r == params.BeaconConfig().ZeroHash {
|
||||
genesisBlock, err := b.db.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if genesisBlock != nil && !genesisBlock.IsNil() {
|
||||
r, err = genesisBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
b.finalizedStateAtStartUp, err = sg.StateByRoot(ctx, r)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b.stateGen = sg
|
||||
return nil
|
||||
return sg, err
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
func (b *BeaconNode) registerP2P(cliCtx *cli.Context, sg stategen.StateManager) error {
|
||||
bootstrapNodeAddrs, dataDir, err := registration.P2PPreregistration(cliCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vc := stategen.NewLastFinalizedValidatorCounter(0, b.db, sg)
|
||||
svc, err := p2p.NewService(b.ctx, &p2p.Config{
|
||||
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
|
||||
StaticPeers: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
|
||||
@@ -559,9 +539,8 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
|
||||
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
DisableDiscv5: cliCtx.Bool(flags.DisableDiscv5.Name),
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ValCounter: vc,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -618,13 +597,19 @@ func (b *BeaconNode) registerBlockchainService() error {
|
||||
blockchain.WithSlashingPool(b.slashingsPool),
|
||||
blockchain.WithP2PBroadcaster(b.fetchP2P()),
|
||||
blockchain.WithStateNotifier(b),
|
||||
blockchain.WithForkChoiceStore(b.forkChoiceStore),
|
||||
blockchain.WithAttestationService(attService),
|
||||
blockchain.WithStateGen(b.stateGen),
|
||||
blockchain.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
blockchain.WithFinalizedStateAtStartUp(b.finalizedStateAtStartUp),
|
||||
blockchain.WithProposerIdsCache(b.proposerIdsCache),
|
||||
)
|
||||
|
||||
if features.Get().DisableForkchoiceDoublyLinkedTree {
|
||||
opts = append(opts, blockchain.WithForkChoiceStore(protoarray.New()))
|
||||
} else {
|
||||
opts = append(opts, blockchain.WithForkChoiceStore(doublylinkedtree.New()))
|
||||
}
|
||||
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not register blockchain service")
|
||||
@@ -852,7 +837,7 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
return b.services.RegisterService(rpcService)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerPrometheusService(cliCtx *cli.Context) error {
|
||||
func (b *BeaconNode) registerPrometheusService(_ *cli.Context) error {
|
||||
var additionalHandlers []prometheus.Handler
|
||||
var p *p2p.Service
|
||||
if err := b.services.FetchService(&p); err != nil {
|
||||
|
||||
@@ -35,6 +35,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd:__subpackages__",
|
||||
"//testing/endtoend/evaluators:__pkg__",
|
||||
"//tools:__subpackages__",
|
||||
],
|
||||
@@ -45,13 +46,12 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -137,7 +137,6 @@ go_test(
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
|
||||
@@ -2,7 +2,7 @@ package p2p
|
||||
|
||||
import (
|
||||
statefeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
|
||||
)
|
||||
|
||||
// Config for the p2p service. These parameters are set from application level flags
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
type Config struct {
|
||||
NoDiscovery bool
|
||||
EnableUPnP bool
|
||||
DisableDiscv5 bool
|
||||
StaticPeers []string
|
||||
BootstrapNodeAddr []string
|
||||
Discv5BootStrapAddr []string
|
||||
@@ -27,5 +26,5 @@ type Config struct {
|
||||
AllowListCIDR string
|
||||
DenyListCIDR []string
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabase
|
||||
ValCounter stategen.ActiveValidatorCounter
|
||||
}
|
||||
|
||||
@@ -332,6 +332,31 @@ func (s *Service) isPeerAtLimit(inbound bool) bool {
|
||||
return activePeers >= maxPeers || numOfConns >= maxPeers
|
||||
}
|
||||
|
||||
// PeersFromStringAddrs convers peer raw ENRs into multiaddrs for p2p.
|
||||
func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
|
||||
var allAddrs []ma.Multiaddr
|
||||
enodeString, multiAddrString := parseGenericAddrs(addrs)
|
||||
for _, stringAddr := range multiAddrString {
|
||||
addr, err := multiAddrFromString(stringAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get multiaddr from string")
|
||||
}
|
||||
allAddrs = append(allAddrs, addr)
|
||||
}
|
||||
for _, stringAddr := range enodeString {
|
||||
enodeAddr, err := enode.Parse(enode.ValidSchemes, stringAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get enode from string")
|
||||
}
|
||||
addr, err := convertToSingleMultiAddr(enodeAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get multiaddr")
|
||||
}
|
||||
allAddrs = append(allAddrs, addr)
|
||||
}
|
||||
return allAddrs, nil
|
||||
}
|
||||
|
||||
func parseBootStrapAddrs(addrs []string) (discv5Nodes []string) {
|
||||
discv5Nodes, _ = parseGenericAddrs(addrs)
|
||||
if len(discv5Nodes) == 0 {
|
||||
@@ -435,30 +460,6 @@ func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
func peersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
|
||||
var allAddrs []ma.Multiaddr
|
||||
enodeString, multiAddrString := parseGenericAddrs(addrs)
|
||||
for _, stringAddr := range multiAddrString {
|
||||
addr, err := multiAddrFromString(stringAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get multiaddr from string")
|
||||
}
|
||||
allAddrs = append(allAddrs, addr)
|
||||
}
|
||||
for _, stringAddr := range enodeString {
|
||||
enodeAddr, err := enode.Parse(enode.ValidSchemes, stringAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get enode from string")
|
||||
}
|
||||
addr, err := convertToSingleMultiAddr(enodeAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get multiaddr")
|
||||
}
|
||||
allAddrs = append(allAddrs, addr)
|
||||
}
|
||||
return allAddrs, nil
|
||||
}
|
||||
|
||||
func multiAddrFromString(address string) (ma.Multiaddr, error) {
|
||||
return ma.NewMultiaddr(address)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/encoder",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"reflect"
|
||||
"strings"
|
||||
@@ -11,7 +10,6 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -95,19 +93,19 @@ func peerScoringParams() (*pubsub.PeerScoreParams, *pubsub.PeerScoreThresholds)
|
||||
}
|
||||
|
||||
func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, error) {
|
||||
activeValidators, err := s.retrieveActiveValidators()
|
||||
c, err := s.cfg.ValCounter.ActiveValidatorCount(s.ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "could not compute active validator count")
|
||||
}
|
||||
switch {
|
||||
case strings.Contains(topic, GossipBlockMessage):
|
||||
return defaultBlockTopicParams(), nil
|
||||
case strings.Contains(topic, GossipAggregateAndProofMessage):
|
||||
return defaultAggregateTopicParams(activeValidators), nil
|
||||
return defaultAggregateTopicParams(c), nil
|
||||
case strings.Contains(topic, GossipAttestationMessage):
|
||||
return defaultAggregateSubnetTopicParams(activeValidators), nil
|
||||
return defaultAggregateSubnetTopicParams(c), nil
|
||||
case strings.Contains(topic, GossipSyncCommitteeMessage):
|
||||
return defaultSyncSubnetTopicParams(activeValidators), nil
|
||||
return defaultSyncSubnetTopicParams(c), nil
|
||||
case strings.Contains(topic, GossipContributionAndProofMessage):
|
||||
return defaultSyncContributionTopicParams(), nil
|
||||
case strings.Contains(topic, GossipExitMessage):
|
||||
@@ -121,43 +119,6 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) retrieveActiveValidators() (uint64, error) {
|
||||
if s.activeValidatorCount != 0 {
|
||||
return s.activeValidatorCount, nil
|
||||
}
|
||||
rt := s.cfg.DB.LastArchivedRoot(s.ctx)
|
||||
if rt == params.BeaconConfig().ZeroHash {
|
||||
genState, err := s.cfg.DB.GenesisState(s.ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if genState == nil || genState.IsNil() {
|
||||
return 0, errors.New("no genesis state exists")
|
||||
}
|
||||
activeVals, err := helpers.ActiveValidatorCount(context.Background(), genState, coreTime.CurrentEpoch(genState))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Cache active validator count
|
||||
s.activeValidatorCount = activeVals
|
||||
return activeVals, nil
|
||||
}
|
||||
bState, err := s.cfg.DB.State(s.ctx, rt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if bState == nil || bState.IsNil() {
|
||||
return 0, errors.Errorf("no state with root %#x exists", rt)
|
||||
}
|
||||
activeVals, err := helpers.ActiveValidatorCount(context.Background(), bState, coreTime.CurrentEpoch(bState))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Cache active validator count
|
||||
s.activeValidatorCount = activeVals
|
||||
return activeVals, nil
|
||||
}
|
||||
|
||||
// Based on the lighthouse parameters.
|
||||
// https://gist.github.com/blacktemplar/5c1862cb3f0e32a1a7fb0b25e79e6e2c
|
||||
|
||||
|
||||
@@ -1,68 +1,11 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
dbutil "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
)
|
||||
|
||||
func TestCorrect_ActiveValidatorsCount(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.MainnetConfig().Copy()
|
||||
cfg.ConfigName = "test"
|
||||
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
db := dbutil.SetupDB(t)
|
||||
s := &Service{
|
||||
ctx: context.Background(),
|
||||
cfg: &Config{DB: db},
|
||||
}
|
||||
bState, err := util.NewBeaconState(func(state *ethpb.BeaconState) error {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
Slashed: false,
|
||||
}
|
||||
}
|
||||
state.Validators = validators
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveGenesisData(s.ctx, bState))
|
||||
|
||||
vals, err := s.retrieveActiveValidators()
|
||||
assert.NoError(t, err, "genesis state not retrieved")
|
||||
assert.Equal(t, int(params.BeaconConfig().MinGenesisActiveValidatorCount), int(vals), "mainnet genesis active count isn't accurate")
|
||||
for i := 0; i < 100; i++ {
|
||||
require.NoError(t, bState.AppendValidator(ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
Slashed: false,
|
||||
}))
|
||||
}
|
||||
require.NoError(t, bState.SetSlot(10000))
|
||||
require.NoError(t, db.SaveState(s.ctx, bState, [32]byte{'a'}))
|
||||
// Reset count
|
||||
s.activeValidatorCount = 0
|
||||
|
||||
// Retrieve last archived state.
|
||||
vals, err = s.retrieveActiveValidators()
|
||||
assert.NoError(t, err, "genesis state not retrieved")
|
||||
assert.Equal(t, int(params.BeaconConfig().MinGenesisActiveValidatorCount)+100, int(vals), "mainnet genesis active count isn't accurate")
|
||||
}
|
||||
|
||||
func TestLoggingParameters(_ *testing.T) {
|
||||
logGossipParameters("testing", nil)
|
||||
logGossipParameters("testing", &pubsub.TopicScoreParams{})
|
||||
|
||||
@@ -21,11 +21,10 @@ import (
|
||||
type P2P interface {
|
||||
Broadcaster
|
||||
SetStreamHandler
|
||||
EncodingProvider
|
||||
PubSubProvider
|
||||
PubSubTopicUser
|
||||
SenderEncoder
|
||||
PeerManager
|
||||
Sender
|
||||
ConnectionHandler
|
||||
PeersProvider
|
||||
MetadataProvider
|
||||
@@ -59,6 +58,12 @@ type ConnectionHandler interface {
|
||||
connmgr.ConnectionGater
|
||||
}
|
||||
|
||||
// SenderEncoder allows sending functionality from libp2p as well as encoding for requests and responses.
|
||||
type SenderEncoder interface {
|
||||
EncodingProvider
|
||||
Sender
|
||||
}
|
||||
|
||||
// EncodingProvider provides p2p network encoding.
|
||||
type EncodingProvider interface {
|
||||
Encoding() encoder.NetworkEncoding
|
||||
|
||||
@@ -29,7 +29,7 @@ func logIPAddr(id peer.ID, addrs ...ma.Multiaddr) {
|
||||
|
||||
func logExternalIPAddr(id peer.ID, addr string, port uint) {
|
||||
if addr != "" {
|
||||
multiAddr, err := multiAddressBuilder(addr, port)
|
||||
multiAddr, err := MultiAddressBuilder(addr, port)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not create multiaddress")
|
||||
return
|
||||
|
||||
@@ -16,10 +16,22 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
)
|
||||
|
||||
// MultiAddressBuilder takes in an ip address string and port to produce a go multiaddr format.
|
||||
func MultiAddressBuilder(ipAddr string, port uint) (ma.Multiaddr, error) {
|
||||
parsedIP := net.ParseIP(ipAddr)
|
||||
if parsedIP.To4() == nil && parsedIP.To16() == nil {
|
||||
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
|
||||
}
|
||||
if parsedIP.To4() != nil {
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
|
||||
}
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/tcp/%d", ipAddr, port))
|
||||
}
|
||||
|
||||
// buildOptions for the libp2p host.
|
||||
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Option {
|
||||
cfg := s.cfg
|
||||
listen, err := multiAddressBuilder(ip.String(), cfg.TCPPort)
|
||||
listen, err := MultiAddressBuilder(ip.String(), cfg.TCPPort)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Failed to p2p listen")
|
||||
}
|
||||
@@ -27,7 +39,7 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
|
||||
if net.ParseIP(cfg.LocalIP) == nil {
|
||||
log.Fatalf("Invalid local ip provided: %s", cfg.LocalIP)
|
||||
}
|
||||
listen, err = multiAddressBuilder(cfg.LocalIP, cfg.TCPPort)
|
||||
listen, err = MultiAddressBuilder(cfg.LocalIP, cfg.TCPPort)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Failed to p2p listen")
|
||||
}
|
||||
@@ -65,7 +77,7 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
|
||||
}
|
||||
if cfg.HostAddress != "" {
|
||||
options = append(options, libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
external, err := multiAddressBuilder(cfg.HostAddress, cfg.TCPPort)
|
||||
external, err := MultiAddressBuilder(cfg.HostAddress, cfg.TCPPort)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Unable to create external multiaddress")
|
||||
} else {
|
||||
@@ -90,17 +102,6 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
|
||||
return options
|
||||
}
|
||||
|
||||
func multiAddressBuilder(ipAddr string, port uint) (ma.Multiaddr, error) {
|
||||
parsedIP := net.ParseIP(ipAddr)
|
||||
if parsedIP.To4() == nil && parsedIP.To16() == nil {
|
||||
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
|
||||
}
|
||||
if parsedIP.To4() != nil {
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
|
||||
}
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/tcp/%d", ipAddr, port))
|
||||
}
|
||||
|
||||
func multiAddressBuilderWithID(ipAddr, protocol string, port uint, id peer.ID) (ma.Multiaddr, error) {
|
||||
parsedIP := net.ParseIP(ipAddr)
|
||||
if parsedIP.To4() == nil && parsedIP.To16() == nil {
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
mock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v3/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/v3/network"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
)
|
||||
@@ -89,7 +90,7 @@ func TestDefaultMultiplexers(t *testing.T) {
|
||||
var err error
|
||||
svc.privKey, err = privKey(svc.cfg)
|
||||
assert.NoError(t, err)
|
||||
ipAddr := ipAddr()
|
||||
ipAddr := network.IPAddr()
|
||||
opts := svc.buildOptions(ipAddr, svc.privKey)
|
||||
err = cfg.Apply(append(opts, libp2p.FallbackDefaults)...)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -7,7 +7,10 @@ go_library(
|
||||
"status.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/peers",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
prysmnetwork "github.com/prysmaticlabs/prysm/v3/network"
|
||||
"github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
@@ -83,7 +84,6 @@ type Service struct {
|
||||
host host.Host
|
||||
genesisTime time.Time
|
||||
genesisValidatorsRoot []byte
|
||||
activeValidatorCount uint64
|
||||
}
|
||||
|
||||
// NewService initializes a new p2p service compatible with shared.Service interface. No
|
||||
@@ -107,7 +107,7 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
|
||||
cfg.Discv5BootStrapAddr = dv5Nodes
|
||||
|
||||
ipAddr := ipAddr()
|
||||
ipAddr := prysmnetwork.IPAddr()
|
||||
s.privKey, err = privKey(s.cfg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to generate p2p private key")
|
||||
@@ -200,8 +200,8 @@ func (s *Service) Start() {
|
||||
}
|
||||
}
|
||||
|
||||
if !s.cfg.NoDiscovery && !s.cfg.DisableDiscv5 {
|
||||
ipAddr := ipAddr()
|
||||
if !s.cfg.NoDiscovery {
|
||||
ipAddr := prysmnetwork.IPAddr()
|
||||
listener, err := s.startDiscoveryV5(
|
||||
ipAddr,
|
||||
s.privKey,
|
||||
@@ -224,7 +224,7 @@ func (s *Service) Start() {
|
||||
s.started = true
|
||||
|
||||
if len(s.cfg.StaticPeers) > 0 {
|
||||
addrs, err := peersFromStringAddrs(s.cfg.StaticPeers)
|
||||
addrs, err := PeersFromStringAddrs(s.cfg.StaticPeers)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not connect to static peer")
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/types",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd:__subpackages__",
|
||||
"//slasher/rpc:__pkg__",
|
||||
"//testing/util:__pkg__",
|
||||
"//validator/client:__pkg__",
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/wrapper"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v3/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/v3/io/file"
|
||||
"github.com/prysmaticlabs/prysm/v3/network"
|
||||
pb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -129,15 +128,6 @@ func metaDataFromConfig(cfg *Config) (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV0(metaData), nil
|
||||
}
|
||||
|
||||
// Retrieves an external ipv4 address and converts into a libp2p formatted value.
|
||||
func ipAddr() net.IP {
|
||||
ip, err := network.ExternalIP()
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Could not get IPv4 address")
|
||||
}
|
||||
return net.ParseIP(ip)
|
||||
}
|
||||
|
||||
// Attempt to dial an address to verify its connectivity
|
||||
func verifyConnectivity(addr string, port uint, protocol string) {
|
||||
if addr != "" {
|
||||
|
||||
@@ -6,12 +6,6 @@ datadir: /var/lib/prysm/beacon
|
||||
# http-web3provider: ETH1 API endpoint, eg. http://localhost:8545 for a local geth service on the default port
|
||||
http-web3provider: http://localhost:8545
|
||||
|
||||
# fallback-web3provider: List of backup ETH1 API endpoints, used if above is not working
|
||||
# For example:
|
||||
# fallback-web3provider:
|
||||
# - https://mainnet.infura.io/v3/YOUR-PROJECT-ID
|
||||
# - https://eth-mainnet.alchemyapi.io/v2/YOUR-PROJECT-ID
|
||||
|
||||
|
||||
# Optional tuning parameters
|
||||
# For full list, see https://docs.prylabs.network/docs/prysm-usage/parameters
|
||||
|
||||
@@ -41,6 +41,29 @@ func wrapFeeRecipientsArray(
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// https://ethereum.github.io/beacon-APIs/#/Validator/registerValidator expects posting a top-level array.
|
||||
// We make it more proto-friendly by wrapping it in a struct.
|
||||
func wrapSignedValidatorRegistrationsArray(
|
||||
endpoint *apimiddleware.Endpoint,
|
||||
_ http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
if _, ok := endpoint.PostRequest.(*signedValidatorRegistrationsRequestJson); !ok {
|
||||
return true, nil
|
||||
}
|
||||
registrations := make([]*signedValidatorRegistrationJson, 0)
|
||||
if err := json.NewDecoder(req.Body).Decode(®istrations); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
|
||||
}
|
||||
j := &signedValidatorRegistrationsRequestJson{Registrations: registrations}
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewReader(b))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// https://ethereum.github.io/beacon-apis/#/Beacon/submitPoolAttestations expects posting a top-level array.
|
||||
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
|
||||
func wrapAttestationsArray(
|
||||
|
||||
@@ -50,6 +50,7 @@ func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
"/eth/v2/debug/beacon/states/{state_id}",
|
||||
"/eth/v1/debug/beacon/heads",
|
||||
"/eth/v2/debug/beacon/heads",
|
||||
"/eth/v1/debug/beacon/forkchoice",
|
||||
"/eth/v1/config/fork_schedule",
|
||||
"/eth/v1/config/deposit_contract",
|
||||
"/eth/v1/config/spec",
|
||||
@@ -68,6 +69,7 @@ func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
"/eth/v1/validator/sync_committee_contribution",
|
||||
"/eth/v1/validator/contribution_and_proofs",
|
||||
"/eth/v1/validator/prepare_beacon_proposer",
|
||||
"/eth/v1/validator/register_validator",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -184,6 +186,8 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
endpoint.GetResponse = &forkChoiceHeadsResponseJson{}
|
||||
case "/eth/v2/debug/beacon/heads":
|
||||
endpoint.GetResponse = &v2ForkChoiceHeadsResponseJson{}
|
||||
case "/eth/v1/debug/beacon/forkchoice":
|
||||
endpoint.GetResponse = &forkchoiceResponse{}
|
||||
case "/eth/v1/config/fork_schedule":
|
||||
endpoint.GetResponse = &forkScheduleResponseJson{}
|
||||
case "/eth/v1/config/deposit_contract":
|
||||
@@ -268,6 +272,11 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: wrapFeeRecipientsArray,
|
||||
}
|
||||
case "/eth/v1/validator/register_validator":
|
||||
endpoint.PostRequest = &signedValidatorRegistrationsRequestJson{}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: wrapSignedValidatorRegistrationsArray,
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("invalid path")
|
||||
}
|
||||
|
||||
@@ -277,6 +277,18 @@ type submitContributionAndProofsRequestJson struct {
|
||||
Data []*signedContributionAndProofJson `json:"data"`
|
||||
}
|
||||
|
||||
type forkchoiceResponse struct {
|
||||
JustifiedCheckpoint *checkpointJson `json:"justified_checkpoint"`
|
||||
FinalizedCheckpoint *checkpointJson `json:"finalized_checkpoint"`
|
||||
BestJustifiedCheckpoint *checkpointJson `json:"best_justified_checkpoint"`
|
||||
UnrealizedJustifiedCheckpoint *checkpointJson `json:"unrealized_justified_checkpoint"`
|
||||
UnrealizedFinalizedCheckpoint *checkpointJson `json:"unrealized_finalized_checkpoint"`
|
||||
ProposerBoostRoot string `json:"proposer_boost_root" hex:"true"`
|
||||
PreviousProposerBoostRoot string `json:"previous_proposer_boost_root" hex:"true"`
|
||||
HeadRoot string `json:"head_root" hex:"true"`
|
||||
ForkChoiceNodes []*forkChoiceNodeJson `json:"forkchoice_nodes"`
|
||||
}
|
||||
|
||||
//----------------
|
||||
// Reusable types.
|
||||
//----------------
|
||||
@@ -773,6 +785,36 @@ type syncCommitteeContributionJson struct {
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type validatorRegistrationJson struct {
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
Pubkey string `json:"pubkey" hex:"true"`
|
||||
}
|
||||
|
||||
type signedValidatorRegistrationJson struct {
|
||||
Message *validatorRegistrationJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type signedValidatorRegistrationsRequestJson struct {
|
||||
Registrations []*signedValidatorRegistrationJson `json:"registrations"`
|
||||
}
|
||||
|
||||
type forkChoiceNodeJson struct {
|
||||
Slot string `json:"slot"`
|
||||
Root string `json:"root" hex:"true"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
JustifiedEpoch string `json:"justified_epoch"`
|
||||
FinalizedEpoch string `json:"finalized_epoch"`
|
||||
UnrealizedJustifiedEpoch string `json:"unrealized_justified_epoch"`
|
||||
UnrealizedFinalizedEpoch string `json:"unrealized_finalized_epoch"`
|
||||
Balance string `json:"balance"`
|
||||
Weight string `json:"weight"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
ExecutionPayload string `json:"execution_payload" hex:"true"`
|
||||
}
|
||||
|
||||
//----------------
|
||||
// SSZ
|
||||
// ---------------
|
||||
|
||||
@@ -23,6 +23,7 @@ go_library(
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
@@ -82,6 +83,7 @@ go_test(
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/execution/testing:go_default_library",
|
||||
|
||||
@@ -722,66 +722,24 @@ func (bs *Server) ListBlockAttestations(ctx context.Context, req *ethpbv1.BlockR
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = blk.PbPhase0Block()
|
||||
if err != nil && !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
v1Alpha1Attestations := blk.Block().Body().Attestations()
|
||||
v1Attestations := make([]*ethpbv1.Attestation, 0, len(v1Alpha1Attestations))
|
||||
for _, att := range v1Alpha1Attestations {
|
||||
migratedAtt := migration.V1Alpha1AttestationToV1(att)
|
||||
v1Attestations = append(v1Attestations, migratedAtt)
|
||||
}
|
||||
if err == nil {
|
||||
v1Blk, err := migration.SignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
return ðpbv1.BlockAttestationsResponse{
|
||||
Data: v1Blk.Block.Body.Attestations,
|
||||
ExecutionOptimistic: false,
|
||||
}, nil
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
|
||||
}
|
||||
|
||||
altairBlk, err := blk.PbAltairBlock()
|
||||
if err != nil && !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
if err == nil {
|
||||
if altairBlk == nil {
|
||||
return nil, status.Errorf(codes.Internal, "Nil block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlk.Block)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
return ðpbv1.BlockAttestationsResponse{
|
||||
Data: v2Blk.Body.Attestations,
|
||||
ExecutionOptimistic: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
bellatrixBlock, err := blk.PbBellatrixBlock()
|
||||
if err != nil && !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
if err == nil {
|
||||
if bellatrixBlock == nil {
|
||||
return nil, status.Errorf(codes.Internal, "Nil block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockBellatrixToV2(bellatrixBlock.Block)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
|
||||
}
|
||||
return ðpbv1.BlockAttestationsResponse{
|
||||
Data: v2Blk.Body.Attestations,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
return ðpbv1.BlockAttestationsResponse{
|
||||
Data: v1Attestations,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) blockFromBlockID(ctx context.Context, blockId []byte) (interfaces.SignedBeaconBlock, error) {
|
||||
|
||||
@@ -1857,8 +1857,11 @@ func TestServer_ListBlockAttestations(t *testing.T) {
|
||||
|
||||
v1Block, err := migration.V1Alpha1ToV1SignedBlock(tt.want)
|
||||
require.NoError(t, err)
|
||||
|
||||
if !reflect.DeepEqual(blk.Data, v1Block.Block.Body.Attestations) {
|
||||
blkAtts := blk.Data
|
||||
if len(blkAtts) == 0 {
|
||||
blkAtts = nil
|
||||
}
|
||||
if !reflect.DeepEqual(blkAtts, v1Block.Block.Body.Attestations) {
|
||||
t.Error("Expected attestations to equal")
|
||||
}
|
||||
})
|
||||
@@ -1961,7 +1964,11 @@ func TestServer_ListBlockAttestations(t *testing.T) {
|
||||
v1Block, err := migration.V1Alpha1BeaconBlockAltairToV2(tt.want.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
if !reflect.DeepEqual(blk.Data, v1Block.Body.Attestations) {
|
||||
blkAtts := blk.Data
|
||||
if len(blkAtts) == 0 {
|
||||
blkAtts = nil
|
||||
}
|
||||
if !reflect.DeepEqual(blkAtts, v1Block.Body.Attestations) {
|
||||
t.Error("Expected attestations to equal")
|
||||
}
|
||||
})
|
||||
@@ -2064,7 +2071,11 @@ func TestServer_ListBlockAttestations(t *testing.T) {
|
||||
v1Block, err := migration.V1Alpha1BeaconBlockBellatrixToV2(tt.want.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
if !reflect.DeepEqual(blk.Data, v1Block.Body.Attestations) {
|
||||
blkAtts := blk.Data
|
||||
if len(blkAtts) == 0 {
|
||||
blkAtts = nil
|
||||
}
|
||||
if !reflect.DeepEqual(blkAtts, v1Block.Body.Attestations) {
|
||||
t.Error("Expected attestations to equal")
|
||||
}
|
||||
})
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/operation"
|
||||
corehelpers "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
|
||||
@@ -164,6 +165,10 @@ func (bs *Server) SubmitAttesterSlashing(ctx context.Context, req *ethpbv1.Attes
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, req.Attestation_1.Data.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not process slots: %v", err)
|
||||
}
|
||||
|
||||
alphaSlashing := migration.V1AttSlashingToV1Alpha1(req)
|
||||
err = blocks.VerifyAttesterSlashing(ctx, headState, alphaSlashing)
|
||||
@@ -216,6 +221,10 @@ func (bs *Server) SubmitProposerSlashing(ctx context.Context, req *ethpbv1.Propo
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, req.SignedHeader_1.Message.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not process slots: %v", err)
|
||||
}
|
||||
|
||||
alphaSlashing := migration.V1ProposerSlashingToV1Alpha1(req)
|
||||
err = blocks.VerifyProposerSlashing(headState, alphaSlashing)
|
||||
@@ -269,6 +278,14 @@ func (bs *Server) SubmitVoluntaryExit(ctx context.Context, req *ethpbv1.SignedVo
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
s, err := slots.EpochStart(req.Message.Epoch)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get epoch from message: %v", err)
|
||||
}
|
||||
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, s)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not process slots: %v", err)
|
||||
}
|
||||
|
||||
validator, err := headState.ValidatorAtIndexReadOnly(req.Message.ValidatorIndex)
|
||||
if err != nil {
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
grpcutil "github.com/prysmaticlabs/prysm/v3/api/grpc"
|
||||
blockchainmock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
|
||||
slashingsmock "github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings/mock"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits/mock"
|
||||
@@ -444,6 +445,80 @@ func TestSubmitAttesterSlashing_Ok(t *testing.T) {
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled)
|
||||
}
|
||||
|
||||
func TestSubmitAttesterSlashing_AcrossFork(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
bs, keys := util.DeterministicGenesisState(t, 1)
|
||||
|
||||
slashing := ðpbv1.AttesterSlashing{
|
||||
Attestation_1: ðpbv1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0},
|
||||
Data: ðpbv1.AttestationData{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
Index: 1,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
|
||||
Source: ðpbv1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
|
||||
},
|
||||
Target: ðpbv1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
Attestation_2: ðpbv1.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0},
|
||||
Data: ðpbv1.AttestationData{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
Index: 1,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
|
||||
Source: ðpbv1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
|
||||
},
|
||||
Target: ðpbv1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
|
||||
newBs := bs.Copy()
|
||||
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, att := range []*ethpbv1.IndexedAttestation{slashing.Attestation_1, slashing.Attestation_2} {
|
||||
sb, err := signing.ComputeDomainAndSign(newBs, att.Data.Target.Epoch, att.Data, params.BeaconConfig().DomainBeaconAttester, keys[0])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
att.Signature = sig.Marshal()
|
||||
}
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
_, err = s.SubmitAttesterSlashing(ctx, slashing)
|
||||
require.NoError(t, err)
|
||||
pendingSlashings := s.SlashingsPool.PendingAttesterSlashings(ctx, bs, true)
|
||||
require.Equal(t, 1, len(pendingSlashings))
|
||||
assert.DeepEqual(t, migration.V1AttSlashingToV1Alpha1(slashing), pendingSlashings[0])
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled)
|
||||
}
|
||||
|
||||
func TestSubmitAttesterSlashing_InvalidSlashing(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
bs, err := util.NewBeaconState()
|
||||
@@ -551,6 +626,68 @@ func TestSubmitProposerSlashing_Ok(t *testing.T) {
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled)
|
||||
}
|
||||
|
||||
func TestSubmitProposerSlashing_AcrossFork(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
bs, keys := util.DeterministicGenesisState(t, 1)
|
||||
|
||||
slashing := ðpbv1.ProposerSlashing{
|
||||
SignedHeader_1: ðpbv1.SignedBeaconBlockHeader{
|
||||
Message: ðpbv1.BeaconBlockHeader{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot1"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot1"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot1"), 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
SignedHeader_2: ðpbv1.SignedBeaconBlockHeader{
|
||||
Message: ðpbv1.BeaconBlockHeader{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: bytesutil.PadTo([]byte("parentroot2"), 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateroot2"), 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte("bodyroot2"), 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
|
||||
newBs := bs.Copy()
|
||||
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, h := range []*ethpbv1.SignedBeaconBlockHeader{slashing.SignedHeader_1, slashing.SignedHeader_2} {
|
||||
sb, err := signing.ComputeDomainAndSign(
|
||||
newBs,
|
||||
slots.ToEpoch(h.Message.Slot),
|
||||
h.Message,
|
||||
params.BeaconConfig().DomainBeaconProposer,
|
||||
keys[0],
|
||||
)
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
h.Signature = sig.Marshal()
|
||||
}
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
_, err = s.SubmitProposerSlashing(ctx, slashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestSubmitProposerSlashing_InvalidSlashing(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
bs, err := util.NewBeaconState()
|
||||
@@ -630,6 +767,47 @@ func TestSubmitVoluntaryExit_Ok(t *testing.T) {
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled)
|
||||
}
|
||||
|
||||
func TestSubmitVoluntaryExit_AcrossFork(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.AltairForkEpoch = params.BeaconConfig().ShardCommitteePeriod + 1
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
bs, keys := util.DeterministicGenesisState(t, 1)
|
||||
// Satisfy activity time required before exiting.
|
||||
require.NoError(t, bs.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod))))
|
||||
|
||||
exit := ðpbv1.SignedVoluntaryExit{
|
||||
Message: ðpbv1.VoluntaryExit{
|
||||
Epoch: params.BeaconConfig().ShardCommitteePeriod + 1,
|
||||
ValidatorIndex: 0,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
|
||||
newBs := bs.Copy()
|
||||
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)+1))
|
||||
require.NoError(t, err)
|
||||
|
||||
sb, err := signing.ComputeDomainAndSign(newBs, exit.Message.Epoch, exit.Message, params.BeaconConfig().DomainVoluntaryExit, keys[0])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
exit.Signature = sig.Marshal()
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
VoluntaryExitsPool: &mock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
_, err = s.SubmitVoluntaryExit(ctx, exit)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestSubmitVoluntaryExit_InvalidValidatorIndex(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
|
||||
@@ -31,6 +31,8 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
@@ -39,6 +41,7 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -14,29 +14,6 @@ import (
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// GetBeaconState returns the full beacon state for a given state ID.
|
||||
func (ds *Server) GetBeaconState(ctx context.Context, req *ethpbv1.StateRequest) (*ethpbv1.BeaconStateResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "debug.GetBeaconState")
|
||||
defer span.End()
|
||||
|
||||
beaconSt, err := ds.StateFetcher.State(ctx, req.StateId)
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
|
||||
if beaconSt.Version() != version.Phase0 {
|
||||
return nil, status.Error(codes.Internal, "State has incorrect type")
|
||||
}
|
||||
protoSt, err := migration.BeaconStateToProto(beaconSt)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not convert state to proto: %v", err)
|
||||
}
|
||||
|
||||
return ðpbv1.BeaconStateResponse{
|
||||
Data: protoSt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetBeaconStateSSZ returns the SSZ-serialized version of the full beacon state object for given state ID.
|
||||
func (ds *Server) GetBeaconStateSSZ(ctx context.Context, req *ethpbv1.StateRequest) (*ethpbv2.SSZContainer, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "debug.GetBeaconStateSSZ")
|
||||
@@ -140,25 +117,6 @@ func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.BeaconSt
|
||||
return ðpbv2.SSZContainer{Data: sszState, Version: ver}, nil
|
||||
}
|
||||
|
||||
// ListForkChoiceHeads retrieves the leaves of the current fork choice tree.
|
||||
func (ds *Server) ListForkChoiceHeads(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.ForkChoiceHeadsResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "debug.ListForkChoiceHeads")
|
||||
defer span.End()
|
||||
|
||||
headRoots, headSlots := ds.HeadFetcher.ChainHeads()
|
||||
resp := ðpbv1.ForkChoiceHeadsResponse{
|
||||
Data: make([]*ethpbv1.ForkChoiceHead, len(headRoots)),
|
||||
}
|
||||
for i := range headRoots {
|
||||
resp.Data[i] = ðpbv1.ForkChoiceHead{
|
||||
Root: headRoots[i][:],
|
||||
Slot: headSlots[i],
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ListForkChoiceHeadsV2 retrieves the leaves of the current fork choice tree.
|
||||
func (ds *Server) ListForkChoiceHeadsV2(ctx context.Context, _ *emptypb.Empty) (*ethpbv2.ForkChoiceHeadsResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "debug.ListForkChoiceHeadsV2")
|
||||
@@ -182,3 +140,8 @@ func (ds *Server) ListForkChoiceHeadsV2(ctx context.Context, _ *emptypb.Empty) (
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetForkChoice returns a dump fork choice store.
|
||||
func (ds *Server) GetForkChoice(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.ForkChoiceResponse, error) {
|
||||
return ds.ForkFetcher.ForkChoicer().ForkChoiceDump(ctx)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,11 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
blockchainmock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/testutil"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
@@ -17,21 +20,6 @@ import (
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
func TestGetBeaconState(t *testing.T) {
|
||||
fakeState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
server := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
}
|
||||
resp, err := server.GetBeaconState(context.Background(), ðpbv1.StateRequest{
|
||||
StateId: make([]byte, 0),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
}
|
||||
|
||||
func TestGetBeaconStateV2(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := dbTest.SetupDB(t)
|
||||
@@ -196,38 +184,6 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestListForkChoiceHeads(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
expectedSlotsAndRoots := []struct {
|
||||
Slot types.Slot
|
||||
Root [32]byte
|
||||
}{{
|
||||
Slot: 0,
|
||||
Root: bytesutil.ToBytes32(bytesutil.PadTo([]byte("foo"), 32)),
|
||||
}, {
|
||||
Slot: 1,
|
||||
Root: bytesutil.ToBytes32(bytesutil.PadTo([]byte("bar"), 32)),
|
||||
}}
|
||||
|
||||
server := &Server{
|
||||
HeadFetcher: &blockchainmock.ChainService{},
|
||||
}
|
||||
resp, err := server.ListForkChoiceHeads(ctx, &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(resp.Data))
|
||||
for _, sr := range expectedSlotsAndRoots {
|
||||
found := false
|
||||
for _, h := range resp.Data {
|
||||
if h.Slot == sr.Slot {
|
||||
found = true
|
||||
assert.DeepEqual(t, sr.Root[:], h.Root)
|
||||
}
|
||||
}
|
||||
assert.Equal(t, true, found, "Expected head not found")
|
||||
}
|
||||
}
|
||||
|
||||
func TestListForkChoiceHeadsV2(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -284,3 +240,18 @@ func TestListForkChoiceHeadsV2(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_GetForkChoice(t *testing.T) {
|
||||
store := doublylinkedtree.New()
|
||||
fRoot := [32]byte{'a'}
|
||||
jRoot := [32]byte{'b'}
|
||||
fc := &forkchoicetypes.Checkpoint{Epoch: 2, Root: fRoot}
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: 3, Root: jRoot}
|
||||
require.NoError(t, store.UpdateFinalizedCheckpoint(fc))
|
||||
require.NoError(t, store.UpdateJustifiedCheckpoint(jc))
|
||||
bs := &Server{ForkFetcher: &blockchainmock.ChainService{ForkChoiceStore: store}}
|
||||
res, err := bs.GetForkChoice(context.Background(), &empty.Empty{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, types.Epoch(3), res.JustifiedCheckpoint.Epoch, "Did not get wanted justified epoch")
|
||||
require.Equal(t, types.Epoch(2), res.FinalizedCheckpoint.Epoch, "Did not get wanted finalized epoch")
|
||||
}
|
||||
|
||||
@@ -16,4 +16,5 @@ type Server struct {
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
StateFetcher statefetcher.Fetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
ForkFetcher blockchain.ForkFetcher
|
||||
}
|
||||
|
||||
@@ -10,9 +10,11 @@ go_library(
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
@@ -49,13 +51,16 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/builder/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/execution/testing:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/attestations/mock:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
@@ -70,6 +75,7 @@ go_test(
|
||||
"//beacon-chain/sync/initial-sync/testing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
@@ -84,6 +90,7 @@ go_test(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -12,9 +12,11 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/kv"
|
||||
rpchelpers "github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
statev1 "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/v1"
|
||||
@@ -193,11 +195,11 @@ func (vs *Server) GetProposerDuties(ctx context.Context, req *ethpbv1.ProposerDu
|
||||
// where `epoch` is described as `epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD <= current_epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD + 1`.
|
||||
//
|
||||
// Algorithm:
|
||||
// - Get the last valid epoch. This is the last epoch of the next sync committee period.
|
||||
// - Get the state for the requested epoch. If it's a future epoch from the current sync committee period
|
||||
// or an epoch from the next sync committee period, then get the current state.
|
||||
// - Get the state's current sync committee. If it's an epoch from the next sync committee period, then get the next sync committee.
|
||||
// - Get duties.
|
||||
// - Get the last valid epoch. This is the last epoch of the next sync committee period.
|
||||
// - Get the state for the requested epoch. If it's a future epoch from the current sync committee period
|
||||
// or an epoch from the next sync committee period, then get the current state.
|
||||
// - Get the state's current sync committee. If it's an epoch from the next sync committee period, then get the next sync committee.
|
||||
// - Get duties.
|
||||
func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpbv2.SyncCommitteeDutiesRequest) (*ethpbv2.SyncCommitteeDutiesResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.GetSyncCommitteeDuties")
|
||||
defer span.End()
|
||||
@@ -267,23 +269,6 @@ func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpbv2.SyncC
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ProduceBlock requests the beacon node to produce a valid unsigned beacon block, which can then be signed by a proposer and submitted.
|
||||
func (vs *Server) ProduceBlock(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv1.ProduceBlockResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlock")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
block, err := vs.v1BeaconBlock(ctx, req)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block: %v", err)
|
||||
}
|
||||
return ðpbv1.ProduceBlockResponse{Data: block}, nil
|
||||
}
|
||||
|
||||
// ProduceBlockV2 requests the beacon node to produce a valid unsigned beacon block, which can then be signed by a proposer and submitted.
|
||||
func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.ProduceBlockResponseV2, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlockV2")
|
||||
@@ -419,7 +404,11 @@ func (vs *Server) ProduceBlockV2SSZ(ctx context.Context, req *ethpbv1.ProduceBlo
|
||||
// ProduceBlindedBlock requests the beacon node to produce a valid unsigned blinded beacon block,
|
||||
// which can then be signed by a proposer and submitted.
|
||||
//
|
||||
// Pre-Bellatrix, this endpoint will return a regular block.
|
||||
// Under the following conditions, this endpoint will return an error.
|
||||
// - The node is syncing or optimistic mode (after bellatrix).
|
||||
// - The builder is not figured (after bellatrix).
|
||||
// - The relayer circuit breaker is activated (after bellatrix).
|
||||
// - The relayer responded with an error (after bellatrix).
|
||||
func (vs *Server) ProduceBlindedBlock(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.ProduceBlindedBlockResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlindedBlock")
|
||||
defer span.End()
|
||||
@@ -428,57 +417,76 @@ func (vs *Server) ProduceBlindedBlock(ctx context.Context, req *ethpbv1.ProduceB
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v1alpha1req := ðpbalpha.BlockRequest{
|
||||
Slot: req.Slot,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
Graffiti: req.Graffiti,
|
||||
}
|
||||
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
|
||||
|
||||
// Before Bellatrix, return normal block.
|
||||
if req.Slot < types.Slot(params.BeaconConfig().BellatrixForkEpoch)*params.BeaconConfig().SlotsPerEpoch {
|
||||
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
// We simply return err because it's already of a gRPC error type.
|
||||
return nil, err
|
||||
}
|
||||
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_PHASE0,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_Phase0Block{Phase0Block: block},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_ALTAIR,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_AltairBlock{AltairBlock: block},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// After Bellatrix, return blinded block.
|
||||
optimistic, err := vs.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
// We simply return err because it's already of a gRPC error type.
|
||||
return nil, err
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if the node is a optimistic node: %v", err)
|
||||
}
|
||||
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_PHASE0,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_Phase0Block{Phase0Block: block},
|
||||
},
|
||||
}, nil
|
||||
if optimistic {
|
||||
return nil, status.Errorf(codes.Unavailable, "The node is currently optimistic and cannot serve validators")
|
||||
}
|
||||
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_ALTAIR,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_AltairBlock{AltairBlock: block},
|
||||
},
|
||||
}, nil
|
||||
altairBlk, err := vs.V1Alpha1Server.BuildAltairBeaconBlock(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
bellatrixBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Bellatrix)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockBellatrixToV2Blinded(bellatrixBlock.Bellatrix)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: block},
|
||||
},
|
||||
}, nil
|
||||
ok, b, err := vs.V1Alpha1Server.GetAndBuildBlindBlock(ctx, altairBlk)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare blind beacon block: %v", err)
|
||||
}
|
||||
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
|
||||
if !ok {
|
||||
return nil, status.Error(codes.Unavailable, "Builder is not available due to miss-config or circuit breaker")
|
||||
}
|
||||
blk, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(b.GetBlindedBellatrix())
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: blk},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ProduceBlindedBlockSSZ requests the beacon node to produce a valid unsigned blinded beacon block,
|
||||
@@ -562,7 +570,24 @@ func (vs *Server) PrepareBeaconProposer(
|
||||
defer span.End()
|
||||
var feeRecipients []common.Address
|
||||
var validatorIndices []types.ValidatorIndex
|
||||
for _, recipientContainer := range request.Recipients {
|
||||
newRecipients := make([]*ethpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer, 0, len(request.Recipients))
|
||||
for _, r := range request.Recipients {
|
||||
f, err := vs.V1Alpha1Server.BeaconDB.FeeRecipientByValidatorID(ctx, r.ValidatorIndex)
|
||||
switch {
|
||||
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
|
||||
newRecipients = append(newRecipients, r)
|
||||
case err != nil:
|
||||
return nil, status.Errorf(codes.Internal, "Could not get fee recipient by validator index: %v", err)
|
||||
default:
|
||||
}
|
||||
if common.BytesToAddress(r.FeeRecipient) != f {
|
||||
newRecipients = append(newRecipients, r)
|
||||
}
|
||||
}
|
||||
if len(newRecipients) == 0 {
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
for _, recipientContainer := range newRecipients {
|
||||
recipient := hexutil.Encode(recipientContainer.FeeRecipient)
|
||||
if !common.IsHexAddress(recipient) {
|
||||
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Invalid fee recipient address: %v", recipient))
|
||||
@@ -579,6 +604,38 @@ func (vs *Server) PrepareBeaconProposer(
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
// SubmitValidatorRegistration submits validator registrations.
|
||||
func (vs *Server) SubmitValidatorRegistration(ctx context.Context, reg *ethpbv1.SubmitValidatorRegistrationsRequest) (*empty.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.SubmitValidatorRegistration")
|
||||
defer span.End()
|
||||
|
||||
if vs.V1Alpha1Server.BlockBuilder == nil || !vs.V1Alpha1Server.BlockBuilder.Configured() {
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not register block builder: %v", builder.ErrNoBuilder)
|
||||
}
|
||||
var registrations []*ethpbalpha.SignedValidatorRegistrationV1
|
||||
for i, registration := range reg.Registrations {
|
||||
message := reg.Registrations[i].Message
|
||||
registrations = append(registrations, ðpbalpha.SignedValidatorRegistrationV1{
|
||||
Message: ðpbalpha.ValidatorRegistrationV1{
|
||||
FeeRecipient: message.FeeRecipient,
|
||||
GasLimit: message.GasLimit,
|
||||
Timestamp: message.Timestamp,
|
||||
Pubkey: message.Pubkey,
|
||||
},
|
||||
Signature: registration.Signature,
|
||||
})
|
||||
}
|
||||
if len(registrations) == 0 {
|
||||
return &empty.Empty{}, status.Errorf(codes.InvalidArgument, "Validator registration request is empty")
|
||||
}
|
||||
|
||||
if err := vs.V1Alpha1Server.BlockBuilder.RegisterValidator(ctx, registrations); err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Could not register block builder: %v", err)
|
||||
}
|
||||
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
|
||||
// ProduceAttestationData requests that the beacon node produces attestation data for
|
||||
// the requested committee index and slot based on the nodes current head.
|
||||
func (vs *Server) ProduceAttestationData(ctx context.Context, req *ethpbv1.ProduceAttestationDataRequest) (*ethpbv1.ProduceAttestationDataResponse, error) {
|
||||
@@ -978,19 +1035,6 @@ func v1ValidatorStatusToV1Alpha1(valStatus ethpbv1.ValidatorStatus) ethpbalpha.V
|
||||
}
|
||||
}
|
||||
|
||||
func (vs *Server) v1BeaconBlock(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv1.BeaconBlock, error) {
|
||||
v1alpha1req := ðpbalpha.BlockRequest{
|
||||
Slot: req.Slot,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
Graffiti: req.Graffiti,
|
||||
}
|
||||
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return migration.V1Alpha1ToV1Block(v1alpha1resp.GetPhase0())
|
||||
}
|
||||
|
||||
func syncCommitteeDutiesLastValidEpoch(currentEpoch types.Epoch) types.Epoch {
|
||||
currentSyncPeriodIndex := currentEpoch / params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
// Return the last epoch of the next sync committee.
|
||||
|
||||
@@ -10,13 +10,16 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mockChain "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
|
||||
builderTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/builder/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
dbutil "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v3/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations/mock"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
|
||||
@@ -31,6 +34,7 @@ import (
|
||||
mockSync "github.com/prysmaticlabs/prysm/v3/beacon-chain/sync/initial-sync/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
@@ -43,6 +47,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -655,103 +660,6 @@ func TestSyncCommitteeDutiesLastValidEpoch(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestProduceBlock(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
|
||||
beaconState, parentRoot, privKeys := util.DeterministicGenesisStateWithGenesisBlock(t, ctx, db, 64)
|
||||
|
||||
v1Alpha1Server := &v1alpha1validator.Server{
|
||||
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mockChain.ChainService{},
|
||||
HeadUpdater: &mockChain.ChainService{},
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db),
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
for i := types.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxProposerSlashings; i++ {
|
||||
proposerSlashing, err := util.GenerateProposerSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i],
|
||||
i, /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
proposerSlashings[i] = proposerSlashing
|
||||
err = v1Alpha1Server.SlashingsPool.InsertProposerSlashing(context.Background(), beaconState, proposerSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
attSlashings := make([]*ethpbalpha.AttesterSlashing, params.BeaconConfig().MaxAttesterSlashings)
|
||||
for i := uint64(0); i < params.BeaconConfig().MaxAttesterSlashings; i++ {
|
||||
attesterSlashing, err := util.GenerateAttesterSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i+params.BeaconConfig().MaxProposerSlashings],
|
||||
types.ValidatorIndex(i+params.BeaconConfig().MaxProposerSlashings), /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
attSlashings[i] = attesterSlashing
|
||||
err = v1Alpha1Server.SlashingsPool.InsertAttesterSlashing(context.Background(), beaconState, attesterSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
v1Server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
V1Alpha1Server: v1Alpha1Server,
|
||||
}
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
req := ðpbv1.ProduceBlockRequest{
|
||||
Slot: 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
resp, err := v1Server.ProduceBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, req.Slot, resp.Data.Slot, "Expected block to have slot of 1")
|
||||
assert.DeepEqual(t, parentRoot[:], resp.Data.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, resp.Data.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, resp.Data.Body.Graffiti, "Expected block to have correct graffiti")
|
||||
assert.Equal(t, params.BeaconConfig().MaxProposerSlashings, uint64(len(resp.Data.Body.ProposerSlashings)))
|
||||
expectedPropSlashings := make([]*ethpbv1.ProposerSlashing, len(proposerSlashings))
|
||||
for i, slash := range proposerSlashings {
|
||||
expectedPropSlashings[i] = migration.V1Alpha1ProposerSlashingToV1(slash)
|
||||
}
|
||||
assert.DeepEqual(t, expectedPropSlashings, resp.Data.Body.ProposerSlashings)
|
||||
assert.Equal(t, params.BeaconConfig().MaxAttesterSlashings, uint64(len(resp.Data.Body.AttesterSlashings)))
|
||||
expectedAttSlashings := make([]*ethpbv1.AttesterSlashing, len(attSlashings))
|
||||
for i, slash := range attSlashings {
|
||||
expectedAttSlashings[i] = migration.V1Alpha1AttSlashingToV1(slash)
|
||||
}
|
||||
assert.DeepEqual(t, expectedAttSlashings, resp.Data.Body.AttesterSlashings)
|
||||
}
|
||||
|
||||
func TestProduceBlock_SyncNotReady(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &mockChain.ChainService{State: st}
|
||||
vs := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err = vs.ProduceBlock(context.Background(), ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
|
||||
}
|
||||
|
||||
func TestProduceBlockV2(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
@@ -1937,7 +1845,7 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
assert.DeepEqual(t, aggregatedSig, blk.Body.SyncAggregate.SyncCommitteeSignature)
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
t.Run("Can get blind block from builder service", func(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -1945,6 +1853,8 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
bc := params.BeaconConfig().Copy()
|
||||
bc.AltairForkEpoch = types.Epoch(0)
|
||||
bc.BellatrixForkEpoch = types.Epoch(1)
|
||||
bc.MaxBuilderConsecutiveMissedSlots = params.BeaconConfig().SlotsPerEpoch + 1
|
||||
bc.MaxBuilderEpochMissedSlots = params.BeaconConfig().SlotsPerEpoch
|
||||
params.OverrideBeaconConfig(bc)
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisStateBellatrix(t, params.BeaconConfig().SyncCommitteeSize)
|
||||
@@ -1965,14 +1875,56 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
v1Alpha1Server := &v1alpha1validator.Server{
|
||||
ExecutionEngineCaller: &mockExecution.EngineClient{
|
||||
ExecutionBlock: &enginev1.ExecutionBlock{
|
||||
TotalDifficulty: "0x1",
|
||||
},
|
||||
fb := util.HydrateSignedBeaconBlockBellatrix(ðpbalpha.SignedBeaconBlockBellatrix{})
|
||||
fb.Block.Body.ExecutionPayload.GasLimit = 123
|
||||
wfb, err := blocks.NewSignedBeaconBlock(fb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wfb), "Could not save block")
|
||||
r, err := wfb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
ti := time.Unix(0, 0)
|
||||
ts, err := slots.ToTime(uint64(ti.Unix()), 33)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetGenesisTime(uint64(ti.Unix())))
|
||||
random, err := helpers.RandaoMix(beaconState, coreTime.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
bid := ðpbalpha.BuilderBid{
|
||||
Header: &enginev1.ExecutionPayloadHeader{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: random,
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
BlockNumber: 1,
|
||||
Timestamp: uint64(ts.Unix()),
|
||||
},
|
||||
TimeFetcher: &mockChain.ChainService{},
|
||||
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
Pubkey: sk.PublicKey().Marshal(),
|
||||
Value: bytesutil.PadTo([]byte{1, 2, 3}, 32),
|
||||
}
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
domain, err := signing.ComputeDomain(d, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(bid, domain)
|
||||
require.NoError(t, err)
|
||||
sBid := ðpbalpha.SignedBuilderBid{
|
||||
Message: bid,
|
||||
Signature: sk.Sign(sr[:]).Marshal(),
|
||||
}
|
||||
|
||||
v1Alpha1Server := &v1alpha1validator.Server{
|
||||
BeaconDB: db,
|
||||
ForkFetcher: &mockChain.ChainService{ForkChoiceStore: protoarray.New()},
|
||||
TimeFetcher: &mockChain.ChainService{
|
||||
Genesis: ti,
|
||||
},
|
||||
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:], Block: wfb},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mockChain.ChainService{},
|
||||
@@ -1987,6 +1939,15 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
StateGen: stategen.New(db),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
BlockBuilder: &builderTest.MockBuilderService{
|
||||
HasConfigured: true,
|
||||
Bid: sBid,
|
||||
},
|
||||
FinalizationFetcher: &mockChain.ChainService{
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{
|
||||
Root: r[:],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
@@ -2044,8 +2005,10 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
require.NoError(t, v1Alpha1Server.SyncCommitteePool.SaveSyncCommitteeContribution(contribution))
|
||||
|
||||
v1Server := &Server{
|
||||
V1Alpha1Server: v1Alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
V1Alpha1Server: v1Alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: &mockChain.ChainService{},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{},
|
||||
}
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 1, privKeys)
|
||||
require.NoError(t, err)
|
||||
@@ -3717,3 +3680,147 @@ func TestPrepareBeaconProposer(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestProposer_PrepareBeaconProposerOverlapping(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
v1Server := &v1alpha1validator.Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
proposerServer := &Server{V1Alpha1Server: v1Server}
|
||||
|
||||
// New validator
|
||||
f := bytesutil.PadTo([]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
|
||||
req := ðpbv1.PrepareBeaconProposerRequest{
|
||||
Recipients: []*ethpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer{
|
||||
{FeeRecipient: f, ValidatorIndex: 1},
|
||||
},
|
||||
}
|
||||
_, err := proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
|
||||
// Same validator
|
||||
hook.Reset()
|
||||
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsDoNotContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
|
||||
// Same validator with different fee recipient
|
||||
hook.Reset()
|
||||
f = bytesutil.PadTo([]byte{0x01, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
|
||||
req = ðpbv1.PrepareBeaconProposerRequest{
|
||||
Recipients: []*ethpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer{
|
||||
{FeeRecipient: f, ValidatorIndex: 1},
|
||||
},
|
||||
}
|
||||
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
|
||||
// More than one validator
|
||||
hook.Reset()
|
||||
f = bytesutil.PadTo([]byte{0x01, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
|
||||
req = ðpbv1.PrepareBeaconProposerRequest{
|
||||
Recipients: []*ethpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer{
|
||||
{FeeRecipient: f, ValidatorIndex: 1},
|
||||
{FeeRecipient: f, ValidatorIndex: 2},
|
||||
},
|
||||
}
|
||||
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
|
||||
// Same validators
|
||||
hook.Reset()
|
||||
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.LogsDoNotContain(t, hook, "Updated fee recipient addresses for validator indices")
|
||||
}
|
||||
|
||||
func BenchmarkServer_PrepareBeaconProposer(b *testing.B) {
|
||||
db := dbutil.SetupDB(b)
|
||||
ctx := context.Background()
|
||||
v1Server := &v1alpha1validator.Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
proposerServer := &Server{V1Alpha1Server: v1Server}
|
||||
|
||||
f := bytesutil.PadTo([]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
|
||||
recipients := make([]*ethpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer, 0)
|
||||
for i := 0; i < 10000; i++ {
|
||||
recipients = append(recipients, ðpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer{FeeRecipient: f, ValidatorIndex: types.ValidatorIndex(i)})
|
||||
}
|
||||
|
||||
req := ðpbv1.PrepareBeaconProposerRequest{
|
||||
Recipients: recipients,
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := proposerServer.PrepareBeaconProposer(ctx, req)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_SubmitValidatorRegistrations(t *testing.T) {
|
||||
type args struct {
|
||||
request *ethpbv1.SubmitValidatorRegistrationsRequest
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "Happy Path",
|
||||
args: args{
|
||||
request: ðpbv1.SubmitValidatorRegistrationsRequest{
|
||||
Registrations: []*ethpbv1.SubmitValidatorRegistrationsRequest_SignedValidatorRegistration{
|
||||
{
|
||||
Message: ðpbv1.SubmitValidatorRegistrationsRequest_ValidatorRegistration{
|
||||
FeeRecipient: make([]byte, fieldparams.BLSPubkeyLength),
|
||||
GasLimit: 30000000,
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: make([]byte, fieldparams.BLSPubkeyLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: "",
|
||||
},
|
||||
{
|
||||
name: "Empty Request",
|
||||
args: args{
|
||||
request: ðpbv1.SubmitValidatorRegistrationsRequest{
|
||||
Registrations: []*ethpbv1.SubmitValidatorRegistrationsRequest_SignedValidatorRegistration{},
|
||||
},
|
||||
},
|
||||
wantErr: "Validator registration request is empty",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
v1Server := &v1alpha1validator.Server{
|
||||
BlockBuilder: &builderTest.MockBuilderService{
|
||||
HasConfigured: true,
|
||||
},
|
||||
BeaconDB: db,
|
||||
}
|
||||
server := &Server{
|
||||
V1Alpha1Server: v1Server,
|
||||
}
|
||||
_, err := server.SubmitValidatorRegistration(ctx, tt.args.request)
|
||||
if tt.wantErr != "" {
|
||||
require.ErrorContains(t, tt.wantErr, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,31 +33,6 @@ type blockContainer struct {
|
||||
isCanonical bool
|
||||
}
|
||||
|
||||
// ListBlocks retrieves blocks by root, slot, or epoch.
|
||||
//
|
||||
// The server may return multiple blocks in the case that a slot or epoch is
|
||||
// provided as the filter criteria. The server may return an empty list when
|
||||
// no blocks in their database match the filter criteria. This RPC should
|
||||
// not return NOT_FOUND. Only one filter criteria should be used.
|
||||
func (bs *Server) ListBlocks(
|
||||
ctx context.Context, req *ethpb.ListBlocksRequest,
|
||||
) (*ethpb.ListBlocksResponse, error) {
|
||||
ctrs, numBlks, nextPageToken, err := bs.listBlocks(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blkContainers, err := convertToProto(ctrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ðpb.ListBlocksResponse{
|
||||
BlockContainers: blkContainers,
|
||||
TotalSize: int32(numBlks),
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListBeaconBlocks retrieves blocks by root, slot, or epoch.
|
||||
//
|
||||
// The server may return multiple blocks in the case that a slot or epoch is
|
||||
@@ -272,23 +247,6 @@ func (bs *Server) listBlocksForGenesis(ctx context.Context, _ *ethpb.ListBlocksR
|
||||
}}, 1, strconv.Itoa(0), nil
|
||||
}
|
||||
|
||||
func convertToProto(ctrs []blockContainer) ([]*ethpb.BeaconBlockContainer, error) {
|
||||
protoCtrs := make([]*ethpb.BeaconBlockContainer, len(ctrs))
|
||||
for i, c := range ctrs {
|
||||
phBlk, err := c.blk.PbPhase0Block()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get phase 0 block: %v", err)
|
||||
}
|
||||
copiedRoot := c.root
|
||||
protoCtrs[i] = ðpb.BeaconBlockContainer{
|
||||
Block: ðpb.BeaconBlockContainer_Phase0Block{Phase0Block: phBlk},
|
||||
BlockRoot: copiedRoot[:],
|
||||
Canonical: c.isCanonical,
|
||||
}
|
||||
}
|
||||
return protoCtrs, nil
|
||||
}
|
||||
|
||||
// GetChainHead retrieves information about the head of the beacon chain from
|
||||
// the view of the beacon chain node.
|
||||
//
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
statefeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/state"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
@@ -31,344 +30,6 @@ import (
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
func TestServer_ListBlocks_NoResults(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
wanted := ðpb.ListBlocksResponse{
|
||||
BlockContainers: make([]*ethpb.BeaconBlockContainer, 0),
|
||||
TotalSize: int32(0),
|
||||
NextPageToken: strconv.Itoa(0),
|
||||
}
|
||||
res, err := bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Slot{
|
||||
Slot: 0,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(wanted, res) {
|
||||
t.Errorf("Wanted %v, received %v", wanted, res)
|
||||
}
|
||||
res, err = bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Slot{
|
||||
Slot: 0,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(wanted, res) {
|
||||
t.Errorf("Wanted %v, received %v", wanted, res)
|
||||
}
|
||||
res, err = bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Root{
|
||||
Root: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(wanted, res) {
|
||||
t.Errorf("Wanted %v, received %v", wanted, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_ListBlocks_Genesis(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
// Should throw an error if no genesis block is found.
|
||||
_, err := bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Genesis{
|
||||
Genesis: true,
|
||||
},
|
||||
})
|
||||
require.ErrorContains(t, "Could not find genesis", err)
|
||||
|
||||
// Should return the proper genesis block if it exists.
|
||||
parentRoot := [32]byte{'a'}
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, db, blk)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
wanted := ðpb.ListBlocksResponse{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainer{
|
||||
{
|
||||
Block: ðpb.BeaconBlockContainer_Phase0Block{Phase0Block: blk},
|
||||
BlockRoot: root[:],
|
||||
Canonical: true,
|
||||
},
|
||||
},
|
||||
NextPageToken: "0",
|
||||
TotalSize: 1,
|
||||
}
|
||||
res, err := bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Genesis{
|
||||
Genesis: true,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(wanted, res) {
|
||||
t.Errorf("Wanted %v, received %v", wanted, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_ListBlocks_Genesis_MultiBlocks(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
// Should return the proper genesis block if it exists.
|
||||
parentRoot := [32]byte{1, 2, 3}
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, db, blk)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
count := types.Slot(100)
|
||||
blks := make([]interfaces.SignedBeaconBlock, count)
|
||||
for i := types.Slot(0); i < count; i++ {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = i
|
||||
require.NoError(t, err)
|
||||
blks[i], err = blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
|
||||
// Should throw an error if more than one blk returned.
|
||||
_, err = bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Genesis{
|
||||
Genesis: true,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestServer_ListBlocks_Pagination(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
|
||||
db := dbTest.SetupDB(t)
|
||||
chain := &chainMock.ChainService{
|
||||
CanonicalRoots: map[[32]byte]bool{},
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
count := types.Slot(100)
|
||||
blks := make([]interfaces.SignedBeaconBlock, count)
|
||||
blkContainers := make([]*ethpb.BeaconBlockContainer, count)
|
||||
for i := types.Slot(0); i < count; i++ {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = i
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
chain.CanonicalRoots[root] = true
|
||||
blks[i], err = blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
blkContainers[i] = ðpb.BeaconBlockContainer{
|
||||
Block: ðpb.BeaconBlockContainer_Phase0Block{Phase0Block: b},
|
||||
BlockRoot: root[:],
|
||||
Canonical: true,
|
||||
}
|
||||
}
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
|
||||
orphanedBlk := util.NewBeaconBlock()
|
||||
orphanedBlk.Block.Slot = 300
|
||||
orphanedBlkRoot, err := orphanedBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, db, orphanedBlk)
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
CanonicalFetcher: chain,
|
||||
}
|
||||
|
||||
root6, err := blks[6].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
req *ethpb.ListBlocksRequest
|
||||
res *ethpb.ListBlocksResponse
|
||||
}{
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Slot{Slot: 5},
|
||||
PageSize: 3},
|
||||
res: ðpb.ListBlocksResponse{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainer{
|
||||
{
|
||||
Block: ðpb.BeaconBlockContainer_Phase0Block{
|
||||
Phase0Block: util.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 5,
|
||||
},
|
||||
}),
|
||||
},
|
||||
BlockRoot: blkContainers[5].BlockRoot,
|
||||
Canonical: blkContainers[5].Canonical,
|
||||
},
|
||||
},
|
||||
NextPageToken: "",
|
||||
TotalSize: 1,
|
||||
},
|
||||
},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Root{Root: root6[:]},
|
||||
PageSize: 3},
|
||||
res: ðpb.ListBlocksResponse{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainer{
|
||||
{
|
||||
Block: ðpb.BeaconBlockContainer_Phase0Block{
|
||||
Phase0Block: util.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 6,
|
||||
},
|
||||
}),
|
||||
},
|
||||
BlockRoot: blkContainers[6].BlockRoot,
|
||||
Canonical: blkContainers[6].Canonical,
|
||||
},
|
||||
},
|
||||
TotalSize: 1,
|
||||
NextPageToken: strconv.Itoa(0),
|
||||
},
|
||||
},
|
||||
{req: ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: root6[:]}},
|
||||
res: ðpb.ListBlocksResponse{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainer{
|
||||
{
|
||||
Block: ðpb.BeaconBlockContainer_Phase0Block{
|
||||
Phase0Block: util.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 6,
|
||||
},
|
||||
}),
|
||||
},
|
||||
BlockRoot: blkContainers[6].BlockRoot,
|
||||
Canonical: blkContainers[6].Canonical,
|
||||
},
|
||||
},
|
||||
TotalSize: 1,
|
||||
NextPageToken: strconv.Itoa(0),
|
||||
},
|
||||
},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 0},
|
||||
PageSize: 100},
|
||||
res: ðpb.ListBlocksResponse{
|
||||
BlockContainers: blkContainers[0:params.BeaconConfig().SlotsPerEpoch],
|
||||
NextPageToken: "",
|
||||
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(1),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 5},
|
||||
PageSize: 3},
|
||||
res: ðpb.ListBlocksResponse{
|
||||
BlockContainers: blkContainers[43:46],
|
||||
NextPageToken: "2",
|
||||
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(1),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 11},
|
||||
PageSize: 7},
|
||||
res: ðpb.ListBlocksResponse{
|
||||
BlockContainers: blkContainers[95:96],
|
||||
NextPageToken: "",
|
||||
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 12},
|
||||
PageSize: 4},
|
||||
res: ðpb.ListBlocksResponse{
|
||||
BlockContainers: blkContainers[96:100],
|
||||
NextPageToken: "",
|
||||
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch / 2)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Slot{Slot: 300},
|
||||
PageSize: 3},
|
||||
res: ðpb.ListBlocksResponse{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainer{
|
||||
{
|
||||
Block: ðpb.BeaconBlockContainer_Phase0Block{
|
||||
Phase0Block: util.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 300,
|
||||
},
|
||||
}),
|
||||
},
|
||||
BlockRoot: orphanedBlkRoot[:],
|
||||
Canonical: false,
|
||||
},
|
||||
},
|
||||
NextPageToken: "",
|
||||
TotalSize: 1}},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("test_%d", i), func(t *testing.T) {
|
||||
res, err := bs.ListBlocks(ctx, test.req)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, res, test.res)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_ListBlocks_Errors(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bs := &Server{BeaconDB: db}
|
||||
exceedsMax := int32(cmd.Get().MaxRPCPageSize + 1)
|
||||
|
||||
wanted := fmt.Sprintf("Requested page size %d can not be greater than max size %d", exceedsMax, cmd.Get().MaxRPCPageSize)
|
||||
req := ðpb.ListBlocksRequest{PageToken: strconv.Itoa(0), PageSize: exceedsMax}
|
||||
_, err := bs.ListBlocks(ctx, req)
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
|
||||
wanted = "Must specify a filter criteria for fetching"
|
||||
req = ðpb.ListBlocksRequest{}
|
||||
_, err = bs.ListBlocks(ctx, req)
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Slot{Slot: 0}}
|
||||
res, err := bs.ListBlocks(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Slot{}}
|
||||
res, err = bs.ListBlocks(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: []byte{'A'}}}
|
||||
res, err = bs.ListBlocks(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: []byte{'A'}}}
|
||||
res, err = bs.ListBlocks(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
}
|
||||
|
||||
// ensures that if any of the checkpoints are zero-valued, an error will be generated without genesis being present
|
||||
func TestServer_GetChainHead_NoGenesis(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
@@ -1192,47 +853,3 @@ func runListBeaconBlocksPagination(t *testing.T, orphanedBlk interfaces.SignedBe
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_ListBeaconBlocks_Errors(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
exceedsMax := int32(cmd.Get().MaxRPCPageSize + 1)
|
||||
|
||||
wanted := fmt.Sprintf("Requested page size %d can not be greater than max size %d", exceedsMax, cmd.Get().MaxRPCPageSize)
|
||||
req := ðpb.ListBlocksRequest{PageToken: strconv.Itoa(0), PageSize: exceedsMax}
|
||||
_, err := bs.ListBlocks(ctx, req)
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
|
||||
wanted = "Must specify a filter criteria for fetching"
|
||||
req = ðpb.ListBlocksRequest{}
|
||||
_, err = bs.ListBeaconBlocks(ctx, req)
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Slot{Slot: 0}}
|
||||
res, err := bs.ListBeaconBlocks(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Slot{}}
|
||||
res, err = bs.ListBeaconBlocks(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: []byte{'A'}}}
|
||||
res, err = bs.ListBeaconBlocks(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: []byte{'A'}}}
|
||||
res, err = bs.ListBeaconBlocks(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
}
|
||||
|
||||
@@ -754,8 +754,6 @@ func (bs *Server) GetValidatorPerformance(
|
||||
beforeTransitionBalances := make([]uint64, 0, responseCap)
|
||||
afterTransitionBalances := make([]uint64, 0, responseCap)
|
||||
effectiveBalances := make([]uint64, 0, responseCap)
|
||||
inclusionSlots := make([]types.Slot, 0, responseCap)
|
||||
inclusionDistances := make([]types.Slot, 0, responseCap)
|
||||
correctlyVotedSource := make([]bool, 0, responseCap)
|
||||
correctlyVotedTarget := make([]bool, 0, responseCap)
|
||||
correctlyVotedHead := make([]bool, 0, responseCap)
|
||||
@@ -789,8 +787,6 @@ func (bs *Server) GetValidatorPerformance(
|
||||
|
||||
if headState.Version() == version.Phase0 {
|
||||
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochAttester)
|
||||
inclusionSlots = append(inclusionSlots, summary.InclusionSlot)
|
||||
inclusionDistances = append(inclusionDistances, summary.InclusionDistance)
|
||||
} else {
|
||||
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochSourceAttester)
|
||||
inactivityScores = append(inactivityScores, summary.InactivityScore)
|
||||
@@ -806,9 +802,7 @@ func (bs *Server) GetValidatorPerformance(
|
||||
BalancesBeforeEpochTransition: beforeTransitionBalances,
|
||||
BalancesAfterEpochTransition: afterTransitionBalances,
|
||||
MissingValidators: missingValidators,
|
||||
InclusionSlots: inclusionSlots, // Only populated in phase0
|
||||
InclusionDistances: inclusionDistances, // Only populated in phase 0
|
||||
InactivityScores: inactivityScores, // Only populated in Altair
|
||||
InactivityScores: inactivityScores, // Only populated in Altair
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1846,12 +1846,9 @@ func TestGetValidatorPerformance_OK(t *testing.T) {
|
||||
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
farFuture := params.BeaconConfig().FarFutureSlot
|
||||
want := ðpb.ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
InclusionSlots: []types.Slot{farFuture, farFuture},
|
||||
InclusionDistances: []types.Slot{farFuture, farFuture},
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
@@ -1918,12 +1915,9 @@ func TestGetValidatorPerformance_Indices(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
_, err = precompute.ProcessRewardsAndPenaltiesPrecompute(c, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
|
||||
require.NoError(t, err)
|
||||
farFuture := params.BeaconConfig().FarFutureSlot
|
||||
want := ðpb.ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
InclusionSlots: []types.Slot{farFuture, farFuture},
|
||||
InclusionDistances: []types.Slot{farFuture, farFuture},
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
@@ -1991,12 +1985,9 @@ func TestGetValidatorPerformance_IndicesPubkeys(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
_, err = precompute.ProcessRewardsAndPenaltiesPrecompute(c, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
|
||||
require.NoError(t, err)
|
||||
farFuture := params.BeaconConfig().FarFutureSlot
|
||||
want := ðpb.ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
InclusionSlots: []types.Slot{farFuture, farFuture},
|
||||
InclusionDistances: []types.Slot{farFuture, farFuture},
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
@@ -2065,8 +2056,6 @@ func TestGetValidatorPerformanceAltair_OK(t *testing.T) {
|
||||
want := ðpb.ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
InclusionSlots: nil,
|
||||
InclusionDistances: nil,
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
@@ -2135,8 +2124,6 @@ func TestGetValidatorPerformanceBellatrix_OK(t *testing.T) {
|
||||
want := ðpb.ValidatorPerformanceResponse{
|
||||
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
|
||||
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
InclusionSlots: nil,
|
||||
InclusionDistances: nil,
|
||||
CorrectlyVotedSource: []bool{false, false},
|
||||
CorrectlyVotedTarget: []bool{false, false},
|
||||
CorrectlyVotedHead: []bool{false, false},
|
||||
|
||||
@@ -64,6 +64,7 @@ go_library(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/block"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
@@ -81,7 +82,26 @@ func (vs *Server) PrepareBeaconProposer(
|
||||
defer span.End()
|
||||
var feeRecipients []common.Address
|
||||
var validatorIndices []types.ValidatorIndex
|
||||
for _, recipientContainer := range request.Recipients {
|
||||
|
||||
newRecipients := make([]*ethpb.PrepareBeaconProposerRequest_FeeRecipientContainer, 0, len(request.Recipients))
|
||||
for _, r := range request.Recipients {
|
||||
f, err := vs.BeaconDB.FeeRecipientByValidatorID(ctx, r.ValidatorIndex)
|
||||
switch {
|
||||
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
|
||||
newRecipients = append(newRecipients, r)
|
||||
case err != nil:
|
||||
return nil, status.Errorf(codes.Internal, "Could not get fee recipient by validator index: %v", err)
|
||||
default:
|
||||
}
|
||||
if common.BytesToAddress(r.FeeRecipient) != f {
|
||||
newRecipients = append(newRecipients, r)
|
||||
}
|
||||
}
|
||||
if len(newRecipients) == 0 {
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
for _, recipientContainer := range newRecipients {
|
||||
recipient := hexutil.Encode(recipientContainer.FeeRecipient)
|
||||
if !common.IsHexAddress(recipient) {
|
||||
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Invalid fee recipient address: %v", recipient))
|
||||
@@ -162,12 +182,6 @@ func (vs *Server) computeStateRoot(ctx context.Context, block interfaces.SignedB
|
||||
return root[:], nil
|
||||
}
|
||||
|
||||
// SubmitValidatorRegistration submits validator registration.
|
||||
// Deprecated: Use SubmitValidatorRegistrations instead.
|
||||
func (vs *Server) SubmitValidatorRegistration(ctx context.Context, reg *ethpb.SignedValidatorRegistrationV1) (*emptypb.Empty, error) {
|
||||
return vs.SubmitValidatorRegistrations(ctx, ðpb.SignedValidatorRegistrationsV1{Messages: []*ethpb.SignedValidatorRegistrationV1{reg}})
|
||||
}
|
||||
|
||||
// SubmitValidatorRegistrations submits validator registrations.
|
||||
func (vs *Server) SubmitValidatorRegistrations(ctx context.Context, reg *ethpb.SignedValidatorRegistrationsV1) (*emptypb.Empty, error) {
|
||||
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user