mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 14:28:09 -05:00
Compare commits
122 Commits
testStuff1
...
ss-missing
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2838683245 | ||
|
|
476d188fcc | ||
|
|
64db5ad361 | ||
|
|
d0c7cedbba | ||
|
|
b20937e97a | ||
|
|
52a581362c | ||
|
|
537142f79e | ||
|
|
abb32e5f27 | ||
|
|
08c3d149bf | ||
|
|
0d5b7d093a | ||
|
|
80f30a0d5b | ||
|
|
9580f64fea | ||
|
|
b3c3f7f40e | ||
|
|
49f2e12030 | ||
|
|
9aa4407daf | ||
|
|
05b3ef1fcd | ||
|
|
8d086e433d | ||
|
|
8627fe72e8 | ||
|
|
65bf3d0fa8 | ||
|
|
a5da9aedd4 | ||
|
|
3833f2770e | ||
|
|
e6c04e5dad | ||
|
|
5e05398daf | ||
|
|
e1ab034d25 | ||
|
|
84bc8f3d64 | ||
|
|
299fa3494a | ||
|
|
c4deb84012 | ||
|
|
488e19e428 | ||
|
|
bcaae1c440 | ||
|
|
587ba83aca | ||
|
|
091f16b26c | ||
|
|
fb9626fdd7 | ||
|
|
c638e114db | ||
|
|
b1e08307ed | ||
|
|
cac5d0f234 | ||
|
|
52d48b328f | ||
|
|
9729b2ec77 | ||
|
|
7aa3776aa6 | ||
|
|
760c71ef77 | ||
|
|
6c209db3ca | ||
|
|
0725905797 | ||
|
|
166f8a1eb6 | ||
|
|
85896e994e | ||
|
|
4a00b295ed | ||
|
|
d2b39e9697 | ||
|
|
97dc86e742 | ||
|
|
cff3b99918 | ||
|
|
be9847f23c | ||
|
|
4796827d22 | ||
|
|
57b7e0b572 | ||
|
|
b5039e9bd9 | ||
|
|
f5d792299f | ||
|
|
9ce922304f | ||
|
|
3cbb4aace4 | ||
|
|
c94095b609 | ||
|
|
ae858bbd0a | ||
|
|
30cd158ae5 | ||
|
|
2db22adfe0 | ||
|
|
161a14d256 | ||
|
|
9dee22f7ab | ||
|
|
52271cf0ba | ||
|
|
e1f56d403c | ||
|
|
a2193ee014 | ||
|
|
762b3df491 | ||
|
|
2b3025828f | ||
|
|
436792fe38 | ||
|
|
1d07bffe11 | ||
|
|
f086535c8a | ||
|
|
3a4c599a96 | ||
|
|
1c6cbc574e | ||
|
|
2317375983 | ||
|
|
6354748b12 | ||
|
|
e910471784 | ||
|
|
ab7e97ba63 | ||
|
|
e99de7726d | ||
|
|
606fdd2299 | ||
|
|
1eb6025aaa | ||
|
|
d431ceee25 | ||
|
|
4597599196 | ||
|
|
0c32eb5c03 | ||
|
|
4b1cb6fa80 | ||
|
|
9cfb823cc6 | ||
|
|
cb502ceb8c | ||
|
|
8da4d572d9 | ||
|
|
1c6fa65f7b | ||
|
|
eaa2566e90 | ||
|
|
6957f0637f | ||
|
|
01b1f15bdf | ||
|
|
b787fd877a | ||
|
|
2c89ce810d | ||
|
|
e687fff922 | ||
|
|
5e2498be7e | ||
|
|
76f958710f | ||
|
|
1775cf89c6 | ||
|
|
8fecfaee48 | ||
|
|
f089405d2f | ||
|
|
029c81a2e4 | ||
|
|
56c48b4971 | ||
|
|
20ed47a107 | ||
|
|
e30471f1a0 | ||
|
|
3b38765a2d | ||
|
|
b60e508c89 | ||
|
|
a65c670f5e | ||
|
|
4af7d8230a | ||
|
|
27733969f7 | ||
|
|
e70fe1c9fd | ||
|
|
9b3a834437 | ||
|
|
d815fa8f21 | ||
|
|
ac3079f8cd | ||
|
|
cb8f6423e0 | ||
|
|
515e7c959f | ||
|
|
82bbfce524 | ||
|
|
95430ddb57 | ||
|
|
21b7861d37 | ||
|
|
c1e7afa201 | ||
|
|
dfa400d4a1 | ||
|
|
b04c28b30c | ||
|
|
ed07359573 | ||
|
|
25d87dd27b | ||
|
|
a9ccabf6c9 | ||
|
|
2377d6d6ea | ||
|
|
100ca0ebaf |
5
.github/actions/gofmt/Dockerfile
vendored
5
.github/actions/gofmt/Dockerfile
vendored
@@ -1,5 +0,0 @@
|
||||
FROM cytopia/gofmt
|
||||
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
12
.github/actions/gofmt/action.yml
vendored
12
.github/actions/gofmt/action.yml
vendored
@@ -1,12 +0,0 @@
|
||||
name: 'Gofmt checker'
|
||||
description: 'Checks that all project files have been properly formatted.'
|
||||
inputs:
|
||||
path:
|
||||
description: 'Path to check'
|
||||
required: true
|
||||
default: './'
|
||||
runs:
|
||||
using: 'docker'
|
||||
image: 'Dockerfile'
|
||||
args:
|
||||
- ${{ inputs.path }}
|
||||
15
.github/actions/gofmt/entrypoint.sh
vendored
15
.github/actions/gofmt/entrypoint.sh
vendored
@@ -1,15 +0,0 @@
|
||||
#!/bin/sh -l
|
||||
set -e
|
||||
|
||||
cd $GITHUB_WORKSPACE
|
||||
|
||||
# Check if any files are not formatted.
|
||||
nonformatted="$(gofmt -l $1 2>&1)"
|
||||
|
||||
# Return if `go fmt` passes.
|
||||
[ -z "$nonformatted" ] && exit 0
|
||||
|
||||
# Notify of issues with formatting.
|
||||
echo "Following files need to be properly formatted:"
|
||||
echo "$nonformatted"
|
||||
exit 1
|
||||
15
.github/workflows/go.yml
vendored
15
.github/workflows/go.yml
vendored
@@ -18,18 +18,6 @@ jobs:
|
||||
id: gomodtidy
|
||||
uses: ./.github/actions/gomodtidy
|
||||
|
||||
- name: Gofmt checker
|
||||
id: gofmt
|
||||
uses: ./.github/actions/gofmt
|
||||
with:
|
||||
path: ./
|
||||
|
||||
- name: GoImports checker
|
||||
id: goimports
|
||||
uses: Jerome1337/goimports-action@v1.0.2
|
||||
with:
|
||||
goimports-path: ./
|
||||
|
||||
gosec:
|
||||
name: Gosec scan
|
||||
runs-on: ubuntu-latest
|
||||
@@ -45,7 +33,7 @@ jobs:
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@latest
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@v2.12.0
|
||||
gosec -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
|
||||
|
||||
lint:
|
||||
@@ -65,6 +53,7 @@ jobs:
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.47.2
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
|
||||
build:
|
||||
name: Build
|
||||
|
||||
@@ -11,7 +11,9 @@ run:
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- deadcode
|
||||
- gofmt
|
||||
- goimports
|
||||
- unused
|
||||
- errcheck
|
||||
- gosimple
|
||||
- gocognit
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.2.0-rc.1)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-alpha.9/src/engine)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.2.0-rc.3)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.1/src/engine)
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
[](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
|
||||
|
||||
|
||||
22
WORKSPACE
22
WORKSPACE
@@ -28,7 +28,7 @@ load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain")
|
||||
|
||||
llvm_toolchain(
|
||||
name = "llvm_toolchain",
|
||||
llvm_version = "10.0.0",
|
||||
llvm_version = "13.0.1",
|
||||
)
|
||||
|
||||
load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
|
||||
@@ -215,7 +215,7 @@ filegroup(
|
||||
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.2.0-rc.1"
|
||||
consensus_spec_version = "v1.2.0-rc.3"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -231,7 +231,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "9c93f87378aaa6d6fe1c67b396eac2aacc9594af2a83f028cb99c95dea5b81df",
|
||||
sha256 = "18ca21497f41042cdbe60e2333b100d218b2994fb514964b9deb23daf615a12f",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -247,7 +247,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "52f2c52415228cee8a4de5a09abff785f439a77dfef8f03e834e4e16857673c1",
|
||||
sha256 = "47b8f6fabe39b4a69f13054ba74e26ab51581ddbd359c18cf0f03317474e299c",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -263,7 +263,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "022dcc0d6de7dd27b337a0d1b945077eaf5ee47000700395a693fc25e12f96df",
|
||||
sha256 = "a061efc05429b169393c32dc2633a948269461b0fe681f11d41e170a880dcc71",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -278,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "0a9c110305cbd6ebbe0d942f0f33e6ce22dd484ce4ceed277bf185a091941cde",
|
||||
sha256 = "753d51c6a6cc6df101c897e4bea77f73b271f50aeda74440f412514d4bd88a86",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -309,9 +309,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "126b615e3853e29b61f082f6c89c8bc1c38cd92fb84b0004396fc49e7acc8d9f",
|
||||
strip_prefix = "eth2-networks-f3ccbe0cf5798d5cd23e4e6e7119aefa043c0935",
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/f3ccbe0cf5798d5cd23e4e6e7119aefa043c0935.tar.gz",
|
||||
sha256 = "82b01a48b143fe0f2fb7fb5f5dd385c1f934335a12d7954f08b1d45d77427b5e",
|
||||
strip_prefix = "eth2-networks-674f7a1d01d9c18345456eab76e3871b3df2126b",
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/674f7a1d01d9c18345456eab76e3871b3df2126b.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -342,9 +342,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "98013b40922e54a64996da49b939e0a88fe2456f68eedc5aee4ceba0f8623f71",
|
||||
sha256 = "e0c0b5dc609b3a221e74c720f483c595441f2ad5e38bb8aa3522636039945a6f",
|
||||
urls = [
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.0/prysm-web-ui.tar.gz",
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.1/prysm-web-ui.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -21,14 +21,13 @@ import (
|
||||
// OriginData represents the BeaconState and SignedBeaconBlock necessary to start an empty Beacon Node
|
||||
// using Checkpoint Sync.
|
||||
type OriginData struct {
|
||||
wsd *WeakSubjectivityData
|
||||
sb []byte
|
||||
bb []byte
|
||||
st state.BeaconState
|
||||
b interfaces.SignedBeaconBlock
|
||||
vu *detect.VersionedUnmarshaler
|
||||
br [32]byte
|
||||
sr [32]byte
|
||||
sb []byte
|
||||
bb []byte
|
||||
st state.BeaconState
|
||||
b interfaces.SignedBeaconBlock
|
||||
vu *detect.VersionedUnmarshaler
|
||||
br [32]byte
|
||||
sr [32]byte
|
||||
}
|
||||
|
||||
// SaveBlock saves the downloaded block to a unique file in the given path.
|
||||
|
||||
@@ -95,8 +95,6 @@ func WithTimeout(timeout time.Duration) ClientOpt {
|
||||
// Client provides a collection of helper methods for calling the Eth Beacon Node API endpoints.
|
||||
type Client struct {
|
||||
hc *http.Client
|
||||
host string
|
||||
scheme string
|
||||
baseURL *url.URL
|
||||
}
|
||||
|
||||
|
||||
@@ -274,9 +274,6 @@ func non200Err(response *http.Response) error {
|
||||
if err != nil {
|
||||
body = "(Unable to read response body.)"
|
||||
} else {
|
||||
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
|
||||
return errors.Wrap(jsonErr, "unable to read response body")
|
||||
}
|
||||
body = "response body:\n" + string(bodyBytes)
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
@@ -285,13 +282,25 @@ func non200Err(response *http.Response) error {
|
||||
log.WithError(ErrNoContent).Debug(msg)
|
||||
return ErrNoContent
|
||||
case 400:
|
||||
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
|
||||
return errors.Wrap(jsonErr, "unable to read response body")
|
||||
}
|
||||
log.WithError(ErrBadRequest).Debug(msg)
|
||||
return errors.Wrap(ErrBadRequest, errMessage.Message)
|
||||
case 404:
|
||||
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
|
||||
return errors.Wrap(jsonErr, "unable to read response body")
|
||||
}
|
||||
log.WithError(ErrNotFound).Debug(msg)
|
||||
return errors.Wrap(ErrNotFound, errMessage.Message)
|
||||
default:
|
||||
case 500:
|
||||
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
|
||||
return errors.Wrap(jsonErr, "unable to read response body")
|
||||
}
|
||||
log.WithError(ErrNotOK).Debug(msg)
|
||||
return errors.Wrap(ErrNotOK, errMessage.Message)
|
||||
default:
|
||||
log.WithError(ErrNotOK).Debug(msg)
|
||||
return errors.Wrap(ErrNotOK, fmt.Sprintf("unsupported error code: %d", response.StatusCode))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,6 +144,23 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.ErrorIs(t, err, ErrNotOK)
|
||||
|
||||
hc = &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusNoContent,
|
||||
Body: io.NopCloser(bytes.NewBuffer([]byte("No header is available."))),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c = &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
_, err = c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.ErrorIs(t, err, ErrNoContent)
|
||||
|
||||
hc = &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
|
||||
@@ -25,10 +25,8 @@ import (
|
||||
type ChainInfoFetcher interface {
|
||||
HeadFetcher
|
||||
FinalizationFetcher
|
||||
GenesisFetcher
|
||||
CanonicalFetcher
|
||||
ForkFetcher
|
||||
TimeFetcher
|
||||
HeadDomainFetcher
|
||||
}
|
||||
|
||||
@@ -70,6 +68,8 @@ type HeadFetcher interface {
|
||||
type ForkFetcher interface {
|
||||
ForkChoicer() forkchoice.ForkChoicer
|
||||
CurrentFork() *ethpb.Fork
|
||||
GenesisFetcher
|
||||
TimeFetcher
|
||||
}
|
||||
|
||||
// CanonicalFetcher retrieves the current chain's canonical information.
|
||||
|
||||
@@ -530,13 +530,13 @@ func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
|
||||
|
||||
optimisticBlock := util.NewBeaconBlock()
|
||||
optimisticBlock.Block.Slot = 97
|
||||
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
|
||||
optimisticRoot, err := util.SaveBlock(t, ctx, beaconDB, optimisticBlock).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock)
|
||||
|
||||
validatedBlock := util.NewBeaconBlock()
|
||||
validatedBlock.Block.Slot = 9
|
||||
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
|
||||
validatedRoot, err := util.SaveBlock(t, ctx, beaconDB, validatedBlock).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, validatedBlock)
|
||||
|
||||
@@ -563,10 +563,11 @@ func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, validated)
|
||||
|
||||
// Before the first finalized epoch, finalized root could be zeros.
|
||||
validatedCheckpoint = ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
r, err := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlock()).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
validatedCheckpoint = ðpb.Checkpoint{Root: r[:]}
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: r[:], Slot: 10}))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
@@ -621,10 +622,11 @@ func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, validated)
|
||||
|
||||
// Before the first finalized epoch, finalized root could be zeros.
|
||||
validatedCheckpoint = ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
r, err := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlock()).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
validatedCheckpoint = ðpb.Checkpoint{Root: r[:]}
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: r[:], Slot: 10}))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
|
||||
@@ -23,6 +23,8 @@ var (
|
||||
errNotOptimisticCandidate = errors.New("block is not suitable for optimistic sync")
|
||||
// errBlockNotFoundInCacheOrDB is returned when a block is not found in the cache or DB.
|
||||
errBlockNotFoundInCacheOrDB = errors.New("block not found in cache or db")
|
||||
// errNilBlockInCache is returned when a nil block is returned from the cache.
|
||||
ErrNilBlockInCache = errors.New("nil block returned from the cache")
|
||||
// errNilStateFromStategen is returned when a nil state is returned from the state generator.
|
||||
errNilStateFromStategen = errors.New("justified state can't be nil")
|
||||
// errWSBlockNotFound is returned when a block is not found in the WS cache or DB.
|
||||
|
||||
@@ -89,7 +89,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
}
|
||||
return payloadID, nil
|
||||
case execution.ErrInvalidPayloadStatus:
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
forkchoiceUpdatedInvalidNodeCount.Inc()
|
||||
headRoot := arg.headRoot
|
||||
if len(lastValidHash) == 0 {
|
||||
lastValidHash = defaultLatestValidHash
|
||||
|
||||
@@ -1083,14 +1083,14 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
fcs.SetOriginRoot(genesisRoot)
|
||||
genesisSummary := ðpb.StateSummary{
|
||||
Root: genesisStateRoot[:],
|
||||
Root: genesisRoot[:],
|
||||
Slot: 0,
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, genesisSummary))
|
||||
|
||||
// Get last validated checkpoint
|
||||
origCheckpoint, err := service.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
|
||||
require.NoError(t, err)
|
||||
// Set last validated checkpoint to a junk root to prevent issues
|
||||
// with saving it due to the state summary.
|
||||
origCheckpoint := ðpb.Checkpoint{Root: genesisRoot[:], Epoch: 0}
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, origCheckpoint))
|
||||
|
||||
// Optimistic finalized checkpoint
|
||||
@@ -1155,6 +1155,18 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
require.Equal(t, false, optimistic)
|
||||
require.DeepEqual(t, validCheckpoint.Root, cp.Root)
|
||||
require.Equal(t, validCheckpoint.Epoch, cp.Epoch)
|
||||
|
||||
// Checkpoint with a lower epoch
|
||||
oldCp, err := service.cfg.BeaconDB.FinalizedCheckpoint(ctx)
|
||||
require.NoError(t, err)
|
||||
invalidCp := ðpb.Checkpoint{
|
||||
Epoch: oldCp.Epoch - 1,
|
||||
}
|
||||
// Nothing should happen as we no-op on an invalid checkpoint.
|
||||
require.NoError(t, service.updateFinalized(ctx, invalidCp))
|
||||
got, err := service.cfg.BeaconDB.FinalizedCheckpoint(ctx)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, oldCp, got)
|
||||
}
|
||||
|
||||
func TestService_removeInvalidBlockAndState(t *testing.T) {
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/math"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -87,7 +88,6 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
return nil
|
||||
}
|
||||
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
s.headLock.RLock()
|
||||
oldHeadBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
@@ -98,12 +98,23 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
headSlot := s.HeadSlot()
|
||||
newHeadSlot := headBlock.Block().Slot()
|
||||
newStateRoot := headBlock.Block().StateRoot()
|
||||
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != oldHeadRoot {
|
||||
commonRoot, forkSlot, err := s.ForkChoicer().CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not find common ancestor root")
|
||||
commonRoot = params.BeaconConfig().ZeroHash
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||
"oldSlot": fmt.Sprintf("%d", headSlot),
|
||||
}).Debug("Chain reorg occurred")
|
||||
absoluteSlotDifference := slots.AbsoluteValueSlotDifference(newHeadSlot, headSlot)
|
||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||
"newRoot": fmt.Sprintf("%#x", newHeadRoot),
|
||||
"oldSlot": fmt.Sprintf("%d", headSlot),
|
||||
"oldRoot": fmt.Sprintf("%#x", oldHeadRoot),
|
||||
"commonAncestorRoot": fmt.Sprintf("%#x", commonRoot),
|
||||
"distance": headSlot + newHeadSlot - 2*forkSlot,
|
||||
"depth": math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
|
||||
}).Info("Chain reorg occurred")
|
||||
isOptimistic, err := s.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if node is optimistically synced")
|
||||
@@ -112,7 +123,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
Type: statefeed.Reorg,
|
||||
Data: ðpbv1.EventChainReorg{
|
||||
Slot: newHeadSlot,
|
||||
Depth: absoluteSlotDifference,
|
||||
Depth: math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
|
||||
OldHeadBlock: oldHeadRoot[:],
|
||||
NewHeadBlock: newHeadRoot[:],
|
||||
OldHeadState: oldStateRoot,
|
||||
@@ -333,7 +344,7 @@ func (s *Service) notifyNewHeadEvent(
|
||||
// This saves the attestations between `orphanedRoot` and the common ancestor root that is derived using `newHeadRoot`.
|
||||
// It also filters out the attestations that is one epoch older as a defense so invalid attestations don't flow into the attestation pool.
|
||||
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
|
||||
commonAncestorRoot, err := s.ForkChoicer().CommonAncestorRoot(ctx, newHeadRoot, orphanedRoot)
|
||||
commonAncestorRoot, _, err := s.ForkChoicer().CommonAncestor(ctx, newHeadRoot, orphanedRoot)
|
||||
switch {
|
||||
// Exit early if there's no common ancestor and root doesn't exist, there would be nothing to save.
|
||||
case errors.Is(err, forkchoice.ErrUnknownCommonAncestor):
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
@@ -150,6 +151,8 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
assert.DeepEqual(t, newHeadSignedBlock, pb, "Head did not change")
|
||||
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
|
||||
require.LogsContain(t, hook, "Chain reorg occurred")
|
||||
require.LogsContain(t, hook, "distance=1")
|
||||
require.LogsContain(t, hook, "depth=1")
|
||||
}
|
||||
|
||||
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
|
||||
@@ -158,10 +161,16 @@ func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
state, _ := util.DeterministicGenesisState(t, 100)
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
|
||||
balances, err := service.justifiedBalances.get(ctx, r)
|
||||
newBlock := util.NewBeaconBlock()
|
||||
newBlock.Block.Slot = 20
|
||||
rt, err := newBlock.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
wrappedBlk, err := consensusblocks.NewSignedBeaconBlock(newBlock)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedBlk))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: rt[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, rt))
|
||||
balances, err := service.justifiedBalances.get(ctx, rt)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, balances, state.Balances(), "Incorrect justified balances")
|
||||
}
|
||||
|
||||
@@ -75,6 +75,17 @@ func (s *Service) getInitSyncBlocks() []interfaces.SignedBeaconBlock {
|
||||
return blks
|
||||
}
|
||||
|
||||
// getInitSyncBlockFromCache returns a block from the initial sync blocks cache. This method
|
||||
func (s *Service) getInitSyncBlockFromCache(r [32]byte) (interfaces.SignedBeaconBlock, error) {
|
||||
s.initSyncBlocksLock.RLock()
|
||||
defer s.initSyncBlocksLock.RUnlock()
|
||||
b := s.initSyncBlocks[r]
|
||||
if b.IsNil() {
|
||||
return nil, ErrNilBlockInCache
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// This clears out the initial sync blocks cache.
|
||||
func (s *Service) clearInitSyncBlocks() {
|
||||
s.initSyncBlocksLock.Lock()
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
consensusBlocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
@@ -51,10 +52,15 @@ func logStateTransitionData(b interfaces.BeaconBlock) error {
|
||||
}
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
|
||||
txs, err := p.Transactions()
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, consensusBlocks.ErrUnsupportedGetter):
|
||||
case err != nil:
|
||||
return err
|
||||
default:
|
||||
log = log.WithField("txCount", len(txs))
|
||||
txsPerSlotCount.Set(float64(len(txs)))
|
||||
}
|
||||
log = log.WithField("txCount", len(txs))
|
||||
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
return nil
|
||||
|
||||
@@ -158,10 +158,47 @@ var (
|
||||
Name: "forkchoice_updated_optimistic_node_count",
|
||||
Help: "Count the number of optimistic nodes after forkchoiceUpdated EE call",
|
||||
})
|
||||
forkchoiceUpdatedInvalidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "forkchoice_updated_invalid_node_count",
|
||||
Help: "Count the number of invalid nodes after forkchoiceUpdated EE call",
|
||||
})
|
||||
txsPerSlotCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "txs_per_slot_count",
|
||||
Help: "Count the number of txs per slot",
|
||||
})
|
||||
missedPayloadIDFilledCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "missed_payload_id_filled_count",
|
||||
Help: "",
|
||||
})
|
||||
onBlockProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "on_block_processing_milliseconds",
|
||||
Help: "Total time in milliseconds to complete a call to onBlock()",
|
||||
})
|
||||
stateTransitionProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "state_transition_processing_milliseconds",
|
||||
Help: "Total time to call a state transition in onBlock()",
|
||||
})
|
||||
processAttsElapsedTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "process_attestations_milliseconds",
|
||||
Help: "Captures latency for process attestations (forkchoice) in milliseconds",
|
||||
Buckets: []float64{1, 5, 20, 100, 500, 1000},
|
||||
},
|
||||
)
|
||||
newAttHeadElapsedTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "new_att_head_milliseconds",
|
||||
Help: "Captures latency for new attestation head in milliseconds",
|
||||
Buckets: []float64{1, 5, 20, 100, 500, 1000},
|
||||
},
|
||||
)
|
||||
newBlockHeadElapsedTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "new_block_head_milliseconds",
|
||||
Help: "Captures latency for new block head in milliseconds",
|
||||
Buckets: []float64{1, 5, 20, 100, 500, 1000},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
|
||||
@@ -307,7 +307,11 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
err = s.SetFinalizedCheckpoint(ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)})
|
||||
b0 := util.NewBeaconBlock()
|
||||
b0.Block.Slot = 0
|
||||
r0, err := util.SaveBlock(t, ctx, beaconDB, b0).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.SetFinalizedCheckpoint(ðpb.Checkpoint{Root: r0[:]})
|
||||
require.NoError(t, err)
|
||||
val := ðpb.Validator{
|
||||
PublicKey: bytesutil.PadTo([]byte("foo"), 48),
|
||||
@@ -317,20 +321,27 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = s.SetBalances([]uint64{0})
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{'g'}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r0))
|
||||
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}))
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
r1, err := util.SaveBlock(t, ctx, beaconDB, b1).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: r1[:]}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r1))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: r1[:]}))
|
||||
|
||||
s1, err := service.getAttPreState(ctx, cp1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
|
||||
|
||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}))
|
||||
b2 := util.NewBeaconBlock()
|
||||
b2.Block.Slot = 2
|
||||
r2, err := util.SaveBlock(t, ctx, beaconDB, b2).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: r2[:]}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r2))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: r2[:]}))
|
||||
s2, err := service.getAttPreState(ctx, cp2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
|
||||
@@ -348,9 +359,13 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
|
||||
|
||||
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
|
||||
cp3 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}))
|
||||
b3 := util.NewBeaconBlock()
|
||||
b3.Block.Slot = 3
|
||||
r3, err := util.SaveBlock(t, ctx, beaconDB, b3).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp3 := ðpb.Checkpoint{Epoch: 1, Root: r3[:]}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r3))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: r3[:]}))
|
||||
s3, err := service.getAttPreState(ctx, cp3)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, s.Slot(), s3.Slot(), "Unexpected state slot")
|
||||
|
||||
@@ -98,6 +98,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
if err := consensusblocks.BeaconBlockIsNil(signed); err != nil {
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
startTime := time.Now()
|
||||
b := signed.Block()
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, b)
|
||||
@@ -115,10 +116,13 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stateTransitionStartTime := time.Now()
|
||||
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
stateTransitionProcessingTime.Observe(float64(time.Since(stateTransitionStartTime).Milliseconds()))
|
||||
|
||||
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -139,6 +143,9 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
if err := s.insertBlockToForkchoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
|
||||
}
|
||||
if err := s.handleBlockAttestations(ctx, signed.Block(), postState); err != nil {
|
||||
return errors.Wrap(err, "could not handle block's attestations")
|
||||
}
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, signed.Block().Body().AttesterSlashings())
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
|
||||
@@ -179,10 +186,14 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", justified.Root)
|
||||
return errors.Wrap(err, msg)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
if err := s.notifyEngineIfChangedHead(ctx, headRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -259,7 +270,11 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
|
||||
}
|
||||
defer reportAttestationInclusion(b)
|
||||
return s.handleEpochBoundary(ctx, postState)
|
||||
if err := s.handleEpochBoundary(ctx, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
|
||||
func getStateVersionAndPayload(st state.BeaconState) (int, *enginev1.ExecutionPayloadHeader, error) {
|
||||
@@ -393,13 +408,15 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
|
||||
if err := s.cfg.BeaconDB.SaveStateSummariesWithPendingBlocks(ctx, []*ethpb.StateSummary{{
|
||||
Slot: b.Block().Slot(),
|
||||
Root: blockRoots[i][:],
|
||||
}); err != nil {
|
||||
}}, s.getInitSyncBlockFromCache); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if i > 0 && jCheckpoints[i].Epoch > jCheckpoints[i-1].Epoch {
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, jCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -430,6 +447,17 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
||||
}
|
||||
|
||||
for r, st := range boundaries {
|
||||
// Ensure the cached block is saved to db before saving state boundary.
|
||||
if !s.cfg.BeaconDB.HasBlock(ctx, r) {
|
||||
b, err := s.getInitSyncBlockFromCache(r)
|
||||
if err != nil || b.IsNil() {
|
||||
log.WithField("block root", fmt.Sprintf("%#x", bytesutil.Trunc(r[:]))).Warn("Could not find block for boundary state root in cache")
|
||||
} else {
|
||||
if err := s.cfg.BeaconDB.SaveBlock(ctx, b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -516,6 +544,29 @@ func (s *Service) insertBlockToForkchoiceStore(ctx context.Context, blk interfac
|
||||
return nil
|
||||
}
|
||||
|
||||
// This feeds in the attestations included in the block to fork choice store. It's allows fork choice store
|
||||
// to gain information on the most current chain.
|
||||
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.BeaconBlock, st state.BeaconState) error {
|
||||
// Feed in block's attestations to fork choice store.
|
||||
for _, a := range blk.Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, st, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indices, err := attestation.AttestingIndices(a.AggregationBits, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
|
||||
if s.cfg.ForkChoiceStore.HasNode(r) {
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.Data.Target.Epoch)
|
||||
} else if err := s.cfg.AttPool.SaveBlockAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
|
||||
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
|
||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
|
||||
|
||||
@@ -135,11 +135,21 @@ func (s *Service) verifyBlkFinalizedSlot(b interfaces.BeaconBlock) error {
|
||||
}
|
||||
|
||||
// updateFinalized saves the init sync blocks, finalized checkpoint, migrates
|
||||
// to cold old states and saves the last validated checkpoint to DB
|
||||
// to cold old states and saves the last validated checkpoint to DB. It returns
|
||||
// early if the new checkpoint is older than the one on db.
|
||||
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.updateFinalized")
|
||||
defer span.End()
|
||||
|
||||
// return early if new checkpoint is not newer than the one in DB
|
||||
currentFinalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cp.Epoch <= currentFinalized.Epoch {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Blocks need to be saved so that we can retrieve finalized block from
|
||||
// DB when migrating states.
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
@@ -267,7 +277,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
if len(pendingNodes) == 1 {
|
||||
return nil
|
||||
}
|
||||
if root != s.ensureRootNotZeros(finalized.Root) {
|
||||
if root != s.ensureRootNotZeros(finalized.Root) && !s.ForkChoicer().HasNode(root) {
|
||||
return errNotDescendantOfFinalized
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.InsertOptimisticChain(ctx, pendingNodes)
|
||||
|
||||
@@ -391,6 +391,7 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
}
|
||||
err = service.onBlockBatch(ctx, blks, blkRoots)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.Stop())
|
||||
}
|
||||
|
||||
func TestCachedPreState_CanGetFromStateSummary_ProtoArray(t *testing.T) {
|
||||
@@ -1350,7 +1351,14 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 10}))
|
||||
assert.NoError(t, gs.SetEth1DepositIndex(8))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
|
||||
newBlock := util.NewBeaconBlock()
|
||||
newBlock.Block.Slot = 20
|
||||
rt, err := newBlock.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
wrappedBlk, err := consensusblocks.NewSignedBeaconBlock(newBlock)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedBlk))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, rt, gs))
|
||||
zeroSig := [96]byte{}
|
||||
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
root := []byte(strconv.Itoa(int(i)))
|
||||
@@ -1361,7 +1369,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
Signature: zeroSig[:],
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
|
||||
}
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, rt))
|
||||
fDeposits := depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
deps := depositCache.AllDeposits(ctx, big.NewInt(107))
|
||||
@@ -1384,11 +1392,27 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 7}))
|
||||
assert.NoError(t, gs.SetEth1DepositIndex(6))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
|
||||
|
||||
newBlock := util.NewBeaconBlock()
|
||||
newBlock.Block.Slot = 20
|
||||
rt, err := newBlock.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
wrappedBlk, err := consensusblocks.NewSignedBeaconBlock(newBlock)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedBlk))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, rt, gs))
|
||||
gs2 := gs.Copy()
|
||||
assert.NoError(t, gs2.SetEth1Data(ðpb.Eth1Data{DepositCount: 15}))
|
||||
assert.NoError(t, gs2.SetEth1DepositIndex(13))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}, gs2))
|
||||
|
||||
newBlock2 := util.NewBeaconBlock()
|
||||
newBlock2.Block.Slot = 30
|
||||
rt2, err := newBlock2.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
wrappedBlk2, err := consensusblocks.NewSignedBeaconBlock(newBlock2)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedBlk2))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, rt2, gs2))
|
||||
zeroSig := [96]byte{}
|
||||
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
root := []byte(strconv.Itoa(int(i)))
|
||||
@@ -1402,7 +1426,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
// Insert 3 deposits before hand.
|
||||
depositCache.InsertFinalizedDeposits(ctx, 2)
|
||||
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, rt))
|
||||
fDeposits := depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 5, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
|
||||
@@ -1412,7 +1436,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
}
|
||||
|
||||
// Insert New Finalized State with higher deposit count.
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}))
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, rt2))
|
||||
fDeposits = depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 12, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
deps = depositCache.AllDeposits(ctx, big.NewInt(112))
|
||||
@@ -1425,12 +1449,12 @@ func TestRemoveBlockAttestationsInPool_Canonical(t *testing.T) {
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
r, err := util.SaveBlock(t, ctx, beaconDB, b).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, r))
|
||||
|
||||
@@ -3006,10 +3030,9 @@ func TestStore_NoViableHead_Reboot_DoublyLinkedTree(t *testing.T) {
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
|
||||
// The node is optimistic now.
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
require.Equal(t, false, optimistic)
|
||||
require.Equal(t, false, service.ForkChoicer().AllTipsAreInvalid())
|
||||
|
||||
// Check that the node's justified checkpoint does not agree with the
|
||||
@@ -3230,10 +3253,9 @@ func TestStore_NoViableHead_Reboot_Protoarray(t *testing.T) {
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
|
||||
// The node is optimistic now
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
require.Equal(t, false, optimistic)
|
||||
require.Equal(t, false, service.ForkChoicer().AllTipsAreInvalid())
|
||||
|
||||
// Check that the node's justified checkpoint does not agree with the
|
||||
@@ -3314,6 +3336,75 @@ func TestStore_NoViableHead_Reboot_Protoarray(t *testing.T) {
|
||||
require.Equal(t, false, service.ForkChoicer().AllTipsAreInvalid())
|
||||
}
|
||||
|
||||
func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(doublylinkedtree.New()),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// prepare another block that is not inserted
|
||||
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
|
||||
require.NoError(t, err)
|
||||
b3, err := util.GenerateFullBlock(st3, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a := wsb.Block().Body().Attestations()[0]
|
||||
r := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
|
||||
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a3 := wsb3.Block().Body().Attestations()[0]
|
||||
r3 := bytesutil.ToBytes32(a3.Data.BeaconBlockRoot)
|
||||
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
|
||||
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committe as st
|
||||
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committe as st
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
|
||||
}
|
||||
|
||||
// Helper function to simulate the block being on time or delayed for proposer
|
||||
// boost. It alters the genesisTime tracked by the store.
|
||||
func driftGenesisTime(s *Service, slot int64, delay int64) {
|
||||
|
||||
@@ -147,17 +147,22 @@ func (s *Service) UpdateHead(ctx context.Context) error {
|
||||
s.processAttestationsLock.Lock()
|
||||
defer s.processAttestationsLock.Unlock()
|
||||
|
||||
start := time.Now()
|
||||
s.processAttestations(ctx)
|
||||
processAttsElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
justified := s.ForkChoicer().JustifiedCheckpoint()
|
||||
balances, err := s.justifiedBalances.get(ctx, justified.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
start = time.Now()
|
||||
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Resolving fork due to new attestation")
|
||||
}
|
||||
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
s.headLock.RLock()
|
||||
if s.headRoot() != newHeadRoot {
|
||||
log.WithFields(logrus.Fields{
|
||||
|
||||
@@ -150,11 +150,6 @@ func (s *Service) handlePostBlockOperations(b interfaces.BeaconBlock) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add block attestations to the fork choice pool to compute head.
|
||||
if err := s.cfg.AttPool.SaveBlockAttestations(b.Body().Attestations()); err != nil {
|
||||
log.WithError(err).Error("Could not save block attestations for fork choice")
|
||||
return nil
|
||||
}
|
||||
// Mark block exits as seen so we don't include same ones in future blocks.
|
||||
for _, e := range b.Body().VoluntaryExits() {
|
||||
s.cfg.ExitPool.MarkIncluded(e)
|
||||
|
||||
@@ -76,9 +76,9 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
if baCount := len(s.cfg.AttPool.BlockAttestations()); baCount != 2 {
|
||||
if baCount := len(s.cfg.AttPool.BlockAttestations()); baCount != 0 {
|
||||
t.Errorf("Did not get the correct number of block attestations saved to the pool. "+
|
||||
"Got %d but wanted %d", baCount, 2)
|
||||
"Got %d but wanted %d", baCount, 0)
|
||||
}
|
||||
},
|
||||
},
|
||||
|
||||
@@ -231,14 +231,15 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if err := forkChoicer.InsertNode(s.ctx, st, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not insert finalized block to forkchoice")
|
||||
}
|
||||
|
||||
lastValidatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get last validated checkpoint")
|
||||
}
|
||||
if bytes.Equal(finalized.Root, lastValidatedCheckpoint.Root) {
|
||||
if err := forkChoicer.SetOptimisticToValid(s.ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not set finalized block as validated")
|
||||
if !features.Get().EnableStartOptimistic {
|
||||
lastValidatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get last validated checkpoint")
|
||||
}
|
||||
if bytes.Equal(finalized.Root, lastValidatedCheckpoint.Root) {
|
||||
if err := forkChoicer.SetOptimisticToValid(s.ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not set finalized block as validated")
|
||||
}
|
||||
}
|
||||
}
|
||||
// not attempting to save initial sync blocks here, because there shouldn't be any until
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
@@ -118,8 +119,11 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
require.NoError(t, err)
|
||||
|
||||
stateGen := stategen.New(beaconDB)
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
require.NoError(t, stateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
|
||||
|
||||
// Save a state and block in stategen to purposes of testing a service stop / shutdown.
|
||||
r, err := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlock()).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, r, bState))
|
||||
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
@@ -191,8 +195,6 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
chainService.cfg.FinalizedStateAtStartUp = s
|
||||
// Test the start function.
|
||||
chainService.Start()
|
||||
@@ -380,7 +382,7 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
}
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 1
|
||||
r, err := blk.HashTreeRoot()
|
||||
r, err := util.SaveBlock(t, ctx, beaconDB, blk).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
newState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
@@ -528,3 +530,45 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
require.Equal(b, true, s.cfg.ForkChoiceStore.HasNode(r), "Block is not in fork choice store")
|
||||
}
|
||||
}
|
||||
|
||||
func TestChainService_EverythingOptimistic(t *testing.T) {
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EnableStartOptimistic: true,
|
||||
})
|
||||
defer resetFn()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := util.NewBeaconBlock()
|
||||
headBlock.Block.Slot = finalizedSlot
|
||||
headBlock.Block.ParentRoot = bytesutil.PadTo(genesisRoot[:], 32)
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(finalizedSlot))
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
|
||||
require.NoError(t, err)
|
||||
stateGen := stategen.New(beaconDB)
|
||||
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stateGen), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
require.NoError(t, c.StartFromSavedState(headState))
|
||||
require.Equal(t, true, c.cfg.ForkChoiceStore.HasNode(headRoot))
|
||||
op, err := c.cfg.ForkChoiceStore.IsOptimistic(headRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, op)
|
||||
}
|
||||
|
||||
@@ -30,8 +30,7 @@ type WeakSubjectivityVerifier struct {
|
||||
// NewWeakSubjectivityVerifier validates a checkpoint, and if valid, uses it to initialize a weak subjectivity verifier.
|
||||
func NewWeakSubjectivityVerifier(wsc *ethpb.Checkpoint, db weakSubjectivityDB) (*WeakSubjectivityVerifier, error) {
|
||||
if wsc == nil || len(wsc.Root) == 0 || wsc.Epoch == 0 {
|
||||
log.Info("--weak-subjectivity-checkpoint not provided. Prysm recommends providing a weak subjectivity checkpoint " +
|
||||
"for nodes synced from genesis, or manual verification of block and state roots for checkpoint sync nodes.")
|
||||
log.Debug("--weak-subjectivity-checkpoint not provided")
|
||||
return &WeakSubjectivityVerifier{
|
||||
enabled: false,
|
||||
}, nil
|
||||
|
||||
@@ -2,7 +2,6 @@ package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -66,12 +65,12 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
|
||||
// Is the builder up?
|
||||
if err := s.c.Status(ctx); err != nil {
|
||||
return nil, fmt.Errorf("could not connect to builder: %v", err)
|
||||
log.WithError(err).Error("Failed to check builder status")
|
||||
} else {
|
||||
log.WithField("endpoint", c.NodeURL()).Info("Builder has been configured")
|
||||
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
|
||||
"Builder-constructed blocks or fallback blocks may get orphaned. Use at your own risk!")
|
||||
}
|
||||
|
||||
log.WithField("endpoint", c.NodeURL()).Info("Builder has been configured")
|
||||
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
|
||||
"Builder-constructed blocks or fallback blocks may get orphaned. Use at your own risk!")
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
@@ -185,7 +185,21 @@ func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.Depos
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
return dc.deposits
|
||||
// Make a shallow copy of the deposits and return that. This way, the
|
||||
// caller can safely iterate over the returned list of deposits without
|
||||
// the possibility of new deposits showing up. If we were to return the
|
||||
// list without a copy, when a new deposit is added to the cache, it
|
||||
// would also be present in the returned value. This could result in a
|
||||
// race condition if the list is being iterated over.
|
||||
//
|
||||
// It's not necessary to make a deep copy of this list because the
|
||||
// deposits in the cache should never be modified. It is still possible
|
||||
// for the caller to modify one of the underlying deposits and modify
|
||||
// the cache, but that's not a race condition. Also, a deep copy would
|
||||
// take too long and use too much memory.
|
||||
deposits := make([]*ethpb.DepositContainer, len(dc.deposits))
|
||||
copy(deposits, dc.deposits)
|
||||
return deposits
|
||||
}
|
||||
|
||||
// AllDeposits returns a list of historical deposits until the given block number
|
||||
|
||||
@@ -55,7 +55,7 @@ func (dc *DepositCache) PendingDeposits(ctx context.Context, untilBlk *big.Int)
|
||||
|
||||
depositCntrs := dc.PendingContainers(ctx, untilBlk)
|
||||
|
||||
var deposits []*ethpb.Deposit
|
||||
deposits := make([]*ethpb.Deposit, 0, len(depositCntrs))
|
||||
for _, dep := range depositCntrs {
|
||||
deposits = append(deposits, dep.Deposit)
|
||||
}
|
||||
@@ -71,7 +71,7 @@ func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var depositCntrs []*ethpb.DepositContainer
|
||||
depositCntrs := make([]*ethpb.DepositContainer, 0, len(dc.pendingDeposits))
|
||||
for _, ctnr := range dc.pendingDeposits {
|
||||
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
depositCntrs = append(depositCntrs, ctnr)
|
||||
@@ -139,7 +139,7 @@ func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeInde
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
var cleanDeposits []*ethpb.DepositContainer
|
||||
cleanDeposits := make([]*ethpb.DepositContainer, 0, len(dc.pendingDeposits))
|
||||
for _, dp := range dc.pendingDeposits {
|
||||
if dp.Index >= merkleTreeIndex {
|
||||
cleanDeposits = append(cleanDeposits, dp)
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"beacon_committee.go",
|
||||
"block.go",
|
||||
"genesis.go",
|
||||
"metrics.go",
|
||||
"randao.go",
|
||||
"rewards_penalties.go",
|
||||
"shuffle.go",
|
||||
|
||||
@@ -51,10 +51,11 @@ func ValidateSlotTargetEpoch(data *ethpb.AttestationData) error {
|
||||
// committee count as an argument allows cheaper computation at run time.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex, slot_signature: BLSSignature) -> bool:
|
||||
// committee = get_beacon_committee(state, slot, index)
|
||||
// modulo = max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE)
|
||||
// return bytes_to_uint64(hash(slot_signature)[0:8]) % modulo == 0
|
||||
//
|
||||
// def is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex, slot_signature: BLSSignature) -> bool:
|
||||
// committee = get_beacon_committee(state, slot, index)
|
||||
// modulo = max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE)
|
||||
// return bytes_to_uint64(hash(slot_signature)[0:8]) % modulo == 0
|
||||
func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
|
||||
modulo := uint64(1)
|
||||
if committeeCount/params.BeaconConfig().TargetAggregatorsPerCommittee > 1 {
|
||||
@@ -68,9 +69,10 @@ func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
|
||||
// AggregateSignature returns the aggregated signature of the input attestations.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature:
|
||||
// signatures = [attestation.signature for attestation in attestations]
|
||||
// return bls.Aggregate(signatures)
|
||||
//
|
||||
// def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature:
|
||||
// signatures = [attestation.signature for attestation in attestations]
|
||||
// return bls.Aggregate(signatures)
|
||||
func AggregateSignature(attestations []*ethpb.Attestation) (bls.Signature, error) {
|
||||
sigs := make([]bls.Signature, len(attestations))
|
||||
var err error
|
||||
@@ -95,14 +97,15 @@ func IsAggregated(attestation *ethpb.Attestation) bool {
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex) -> uint64:
|
||||
// """
|
||||
// Compute the correct subnet for an attestation for Phase 0.
|
||||
// Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
|
||||
// """
|
||||
// slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
|
||||
// committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
|
||||
//
|
||||
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
|
||||
// """
|
||||
// Compute the correct subnet for an attestation for Phase 0.
|
||||
// Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
|
||||
// """
|
||||
// slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
|
||||
// committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
|
||||
//
|
||||
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
|
||||
func ComputeSubnetForAttestation(activeValCount uint64, att *ethpb.Attestation) uint64 {
|
||||
return ComputeSubnetFromCommitteeAndSlot(activeValCount, att.Data.CommitteeIndex, att.Data.Slot)
|
||||
}
|
||||
@@ -112,14 +115,15 @@ func ComputeSubnetForAttestation(activeValCount uint64, att *ethpb.Attestation)
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex) -> uint64:
|
||||
// """
|
||||
// Compute the correct subnet for an attestation for Phase 0.
|
||||
// Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
|
||||
// """
|
||||
// slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
|
||||
// committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
|
||||
//
|
||||
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
|
||||
// """
|
||||
// Compute the correct subnet for an attestation for Phase 0.
|
||||
// Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
|
||||
// """
|
||||
// slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
|
||||
// committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
|
||||
//
|
||||
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
|
||||
func ComputeSubnetFromCommitteeAndSlot(activeValCount uint64, comIdx types.CommitteeIndex, attSlot types.Slot) uint64 {
|
||||
slotSinceStart := slots.SinceEpochStarts(attSlot)
|
||||
comCount := SlotCommitteeCount(activeValCount)
|
||||
@@ -133,13 +137,15 @@ func ComputeSubnetFromCommitteeAndSlot(activeValCount uint64, comIdx types.Commi
|
||||
// slots.
|
||||
//
|
||||
// Example:
|
||||
// ATTESTATION_PROPAGATION_SLOT_RANGE = 5
|
||||
// clockDisparity = 24 seconds
|
||||
// current_slot = 100
|
||||
// invalid_attestation_slot = 92
|
||||
// invalid_attestation_slot = 103
|
||||
// valid_attestation_slot = 98
|
||||
// valid_attestation_slot = 101
|
||||
//
|
||||
// ATTESTATION_PROPAGATION_SLOT_RANGE = 5
|
||||
// clockDisparity = 24 seconds
|
||||
// current_slot = 100
|
||||
// invalid_attestation_slot = 92
|
||||
// invalid_attestation_slot = 103
|
||||
// valid_attestation_slot = 98
|
||||
// valid_attestation_slot = 101
|
||||
//
|
||||
// In the attestation must be within the range of 95 to 102 in the example above.
|
||||
func ValidateAttestationTime(attSlot types.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
|
||||
if err := slots.ValidateClock(attSlot, uint64(genesisTime.Unix())); err != nil {
|
||||
@@ -170,13 +176,19 @@ func ValidateAttestationTime(attSlot types.Slot, genesisTime time.Time, clockDis
|
||||
lowerBounds := lowerTime.Add(-clockDisparity)
|
||||
|
||||
// Verify attestation slot within the time range.
|
||||
if attTime.Before(lowerBounds) || attTime.After(upperBounds) {
|
||||
return fmt.Errorf(
|
||||
"attestation slot %d not within attestation propagation range of %d to %d (current slot)",
|
||||
attSlot,
|
||||
lowerBoundsSlot,
|
||||
currentSlot,
|
||||
)
|
||||
attError := fmt.Errorf(
|
||||
"attestation slot %d not within attestation propagation range of %d to %d (current slot)",
|
||||
attSlot,
|
||||
lowerBoundsSlot,
|
||||
currentSlot,
|
||||
)
|
||||
if attTime.Before(lowerBounds) {
|
||||
attReceivedTooEarlyCount.Inc()
|
||||
return attError
|
||||
}
|
||||
if attTime.After(upperBounds) {
|
||||
attReceivedTooLateCount.Inc()
|
||||
return attError
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ func UpdateGenesisEth1Data(state state.BeaconState, deposits []*ethpb.Deposit, e
|
||||
return nil, errors.New("no eth1data provided for genesis state")
|
||||
}
|
||||
|
||||
var leaves [][]byte
|
||||
leaves := make([][]byte, 0, len(deposits))
|
||||
for _, deposit := range deposits {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, fmt.Errorf("nil deposit or deposit with nil data cannot be processed: %v", deposit)
|
||||
|
||||
17
beacon-chain/core/helpers/metrics.go
Normal file
17
beacon-chain/core/helpers/metrics.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
attReceivedTooEarlyCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "attestation_too_early_total",
|
||||
Help: "Increased when an attestation is considered too early",
|
||||
})
|
||||
attReceivedTooLateCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "attestation_too_late_total",
|
||||
Help: "Increased when an attestation is considered too late",
|
||||
})
|
||||
)
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
)
|
||||
|
||||
// NewDB initializes a new DB.
|
||||
func NewDB(ctx context.Context, dirPath string, config *kv.Config) (Database, error) {
|
||||
return kv.NewKVStore(ctx, dirPath, config)
|
||||
func NewDB(ctx context.Context, dirPath string) (Database, error) {
|
||||
return kv.NewKVStore(ctx, dirPath)
|
||||
}
|
||||
|
||||
// NewDBFilename uses the KVStoreDatafilePath so that if this layer of
|
||||
|
||||
@@ -75,6 +75,7 @@ type NoHeadAccessDatabase interface {
|
||||
DeleteStates(ctx context.Context, blockRoots [][32]byte) error
|
||||
SaveStateSummary(ctx context.Context, summary *ethpb.StateSummary) error
|
||||
SaveStateSummaries(ctx context.Context, summaries []*ethpb.StateSummary) error
|
||||
SaveStateSummariesWithPendingBlocks(ctx context.Context, summaries []*ethpb.StateSummary, blockCache func([32]byte) (interfaces.SignedBeaconBlock, error)) error
|
||||
// Checkpoint operations.
|
||||
SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
|
||||
SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
|
||||
|
||||
@@ -57,6 +57,7 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_dgraph_io_ristretto//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
@@ -72,6 +73,7 @@ go_library(
|
||||
"@io_etcd_go_bbolt//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_uber_go_multierr//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func TestStore_Backup(t *testing.T) {
|
||||
db, err := NewKVStore(context.Background(), t.TempDir(), &Config{})
|
||||
db, err := NewKVStore(context.Background(), t.TempDir())
|
||||
require.NoError(t, err, "Failed to instantiate DB")
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -44,7 +44,7 @@ func TestStore_Backup(t *testing.T) {
|
||||
// our NewKVStore function expects when opening a database.
|
||||
require.NoError(t, os.Rename(oldFilePath, newFilePath))
|
||||
|
||||
backedDB, err := NewKVStore(ctx, backupsPath, &Config{})
|
||||
backedDB, err := NewKVStore(ctx, backupsPath)
|
||||
require.NoError(t, err, "Failed to instantiate DB")
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, backedDB.Close(), "Failed to close database")
|
||||
@@ -53,7 +53,7 @@ func TestStore_Backup(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_BackupMultipleBuckets(t *testing.T) {
|
||||
db, err := NewKVStore(context.Background(), t.TempDir(), &Config{})
|
||||
db, err := NewKVStore(context.Background(), t.TempDir())
|
||||
require.NoError(t, err, "Failed to instantiate DB")
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -88,7 +88,7 @@ func TestStore_BackupMultipleBuckets(t *testing.T) {
|
||||
// our NewKVStore function expects when opening a database.
|
||||
require.NoError(t, os.Rename(oldFilePath, newFilePath))
|
||||
|
||||
backedDB, err := NewKVStore(ctx, backupsPath, &Config{})
|
||||
backedDB, err := NewKVStore(ctx, backupsPath)
|
||||
require.NoError(t, err, "Failed to instantiate DB")
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, backedDB.Close(), "Failed to close database")
|
||||
|
||||
@@ -296,32 +296,40 @@ func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.SignedBeaconBl
|
||||
indicesByBucket := createBlockIndicesFromBlock(ctx, blk.Block())
|
||||
indicesForBlocks[i] = indicesByBucket
|
||||
}
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
for i, blk := range blks {
|
||||
for i := range blks {
|
||||
if existingBlock := bkt.Get(blockRoots[i]); existingBlock != nil {
|
||||
continue
|
||||
}
|
||||
if err := updateValueForIndices(ctx, indicesForBlocks[i], blockRoots[i], tx); err != nil {
|
||||
return errors.Wrap(err, "could not update DB indices")
|
||||
}
|
||||
if features.Get().EnableOnlyBlindedBeaconBlocks {
|
||||
blindedBlock, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
if !errors.Is(err, blocks.ErrUnsupportedVersion) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
blk = blindedBlock
|
||||
}
|
||||
}
|
||||
s.blockCache.Set(string(blockRoots[i]), blk, int64(len(encodedBlocks[i])))
|
||||
if err := bkt.Put(blockRoots[i], encodedBlocks[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Populate cache only after successful db update.
|
||||
for i, blk := range blks {
|
||||
if features.Get().EnableOnlyBlindedBeaconBlocks {
|
||||
blindedBlock, err := blk.ToBlinded()
|
||||
if err != nil {
|
||||
if !errors.Is(err, blocks.ErrUnsupportedVersion) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
blk = blindedBlock
|
||||
}
|
||||
}
|
||||
s.blockCache.Set(string(blockRoots[i]), blk, int64(len(encodedBlocks[i])))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveHeadBlockRoot to the db.
|
||||
|
||||
@@ -9,6 +9,7 @@ var ErrDeleteJustifiedAndFinalized = errors.New("cannot delete finalized block o
|
||||
// indicate that a value couldn't be found.
|
||||
var ErrNotFound = errors.New("not found in db")
|
||||
var ErrNotFoundState = errors.Wrap(ErrNotFound, "state not found")
|
||||
var ErrNotFoundBlock = errors.Wrap(ErrNotFound, "block not found")
|
||||
|
||||
// ErrNotFoundOriginBlockRoot is an error specifically for the origin block root getter
|
||||
var ErrNotFoundOriginBlockRoot = errors.Wrap(ErrNotFound, "OriginBlockRoot")
|
||||
|
||||
@@ -37,6 +37,8 @@ const (
|
||||
boltAllocSize = 8 * 1024 * 1024
|
||||
// The size of hash length in bytes
|
||||
hashLength = 32
|
||||
// Specifies the initial mmap size of bolt.
|
||||
mmapSize = 536870912
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -53,6 +55,14 @@ var (
|
||||
Name: "validator_entry_cache_delete_total",
|
||||
Help: "The total number of cache deletes on the validator entry cache.",
|
||||
})
|
||||
stateReadingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "db_beacon_state_reading_milliseconds",
|
||||
Help: "Milliseconds it takes to read a beacon state from the DB",
|
||||
})
|
||||
stateSavingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "db_beacon_state_saving_milliseconds",
|
||||
Help: "Milliseconds it takes to save a beacon state to the DB",
|
||||
})
|
||||
)
|
||||
|
||||
// BlockCacheSize specifies 1000 slots worth of blocks cached, which
|
||||
@@ -70,11 +80,6 @@ var blockedBuckets = [][]byte{
|
||||
finalizedBlockRootsIndexBucket,
|
||||
}
|
||||
|
||||
// Config for the bolt db kv store.
|
||||
type Config struct {
|
||||
InitialMMapSize int
|
||||
}
|
||||
|
||||
// Store defines an implementation of the Prysm Database interface
|
||||
// using BoltDB as the underlying persistent kv-store for Ethereum Beacon Nodes.
|
||||
type Store struct {
|
||||
@@ -96,7 +101,7 @@ func KVStoreDatafilePath(dirPath string) string {
|
||||
// NewKVStore initializes a new boltDB key-value store at the directory
|
||||
// path specified, creates the kv-buckets based on the schema, and stores
|
||||
// an open connection db object as a property of the Store struct.
|
||||
func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, error) {
|
||||
func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
|
||||
hasDir, err := file.HasDir(dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -113,7 +118,7 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
params.BeaconIoConfig().ReadWritePermissions,
|
||||
&bolt.Options{
|
||||
Timeout: 1 * time.Second,
|
||||
InitialMmapSize: config.InitialMMapSize,
|
||||
InitialMmapSize: mmapSize,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
// setupDB instantiates and returns a Store instance.
|
||||
func setupDB(t testing.TB) *Store {
|
||||
db, err := NewKVStore(context.Background(), t.TempDir(), &Config{})
|
||||
db, err := NewKVStore(context.Background(), t.TempDir())
|
||||
require.NoError(t, err, "Failed to instantiate DB")
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close(), "Failed to close database")
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/time"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -30,6 +31,7 @@ import (
|
||||
func (s *Store) State(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.State")
|
||||
defer span.End()
|
||||
startTime := time.Now()
|
||||
enc, err := s.stateBytes(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -44,7 +46,12 @@ func (s *Store) State(ctx context.Context, blockRoot [32]byte) (state.BeaconStat
|
||||
return nil, valErr
|
||||
}
|
||||
|
||||
return s.unmarshalState(ctx, enc, valEntries)
|
||||
st, err := s.unmarshalState(ctx, enc, valEntries)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateReadingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return st, err
|
||||
}
|
||||
|
||||
// StateOrError is just like State(), except it only returns a non-error response
|
||||
@@ -127,6 +134,7 @@ func (s *Store) SaveStates(ctx context.Context, states []state.ReadOnlyBeaconSta
|
||||
if states == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
startTime := time.Now()
|
||||
multipleEncs := make([][]byte, len(states))
|
||||
for i, st := range states {
|
||||
stateBytes, err := marshalState(ctx, st)
|
||||
@@ -136,7 +144,7 @@ func (s *Store) SaveStates(ctx context.Context, states []state.ReadOnlyBeaconSta
|
||||
multipleEncs[i] = stateBytes
|
||||
}
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateBucket)
|
||||
for i, rt := range blockRoots {
|
||||
indicesByBucket := createStateIndicesFromStateSlot(ctx, states[i].Slot())
|
||||
@@ -148,7 +156,11 @@ func (s *Store) SaveStates(ctx context.Context, states []state.ReadOnlyBeaconSta
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
stateSavingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
|
||||
type withValidators interface {
|
||||
@@ -760,8 +772,10 @@ func createStateIndicesFromStateSlot(ctx context.Context, slot types.Slot) map[s
|
||||
// Only following states would be kept:
|
||||
// 1.) state_slot % archived_interval == 0. (e.g. archived_interval=2048, states with slot 2048, 4096... etc)
|
||||
// 2.) archived_interval - archived_interval/3 < state_slot % archived_interval
|
||||
// (e.g. archived_interval=2048, states with slots after 1365).
|
||||
// This is to tolerate skip slots. Not every state lays on the boundary.
|
||||
//
|
||||
// (e.g. archived_interval=2048, states with slots after 1365).
|
||||
// This is to tolerate skip slots. Not every state lays on the boundary.
|
||||
//
|
||||
// 3.) state with current finalized root
|
||||
// 4.) unfinalized States
|
||||
func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint types.Slot) error {
|
||||
|
||||
@@ -2,11 +2,15 @@ package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
"go.uber.org/multierr"
|
||||
)
|
||||
|
||||
// SaveStateSummary saves a state summary object to the DB.
|
||||
@@ -21,10 +25,20 @@ func (s *Store) SaveStateSummary(ctx context.Context, summary *ethpb.StateSummar
|
||||
func (s *Store) SaveStateSummaries(ctx context.Context, summaries []*ethpb.StateSummary) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveStateSummaries")
|
||||
defer span.End()
|
||||
return s.SaveStateSummariesWithPendingBlocks(ctx, summaries, nil)
|
||||
}
|
||||
|
||||
// SaveStateSummariesWithPendingBlocks saves state summary objects to the DB.
|
||||
func (s *Store) SaveStateSummariesWithPendingBlocks(ctx context.Context, summaries []*ethpb.StateSummary, blockCache func([32]byte) (interfaces.SignedBeaconBlock, error)) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveStateSummariesWithPendingBlocks")
|
||||
defer span.End()
|
||||
|
||||
// When we reach the state summary cache prune count,
|
||||
// dump the cached state summaries to the DB.
|
||||
if s.stateSummaryCache.len() >= stateSummaryCachePruneCount {
|
||||
if err := s.ensureBlocksSaved(ctx, blockCache); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.saveCachedStateSummariesDB(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -37,6 +51,25 @@ func (s *Store) SaveStateSummaries(ctx context.Context, summaries []*ethpb.State
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) ensureBlocksSaved(ctx context.Context, blockCache func([32]byte) (interfaces.SignedBeaconBlock, error)) error {
|
||||
if blockCache == nil {
|
||||
return nil
|
||||
}
|
||||
summaries := s.stateSummaryCache.getAll()
|
||||
blocks := make([]interfaces.SignedBeaconBlock, 0, len(summaries))
|
||||
for _, ss := range summaries {
|
||||
if s.HasBlock(ctx, bytesutil.ToBytes32(ss.Root)) {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", ss.Root)).Trace("Block already saved")
|
||||
continue
|
||||
}
|
||||
if b, err := blockCache(bytesutil.ToBytes32(ss.Root)); err == nil && !b.IsNil() {
|
||||
blocks = append(blocks, b)
|
||||
}
|
||||
}
|
||||
log.WithField("blocks", len(blocks)).WithField("summaries", len(summaries)).Debug("Saving blocks from state summary cache")
|
||||
return s.SaveBlocks(ctx, blocks)
|
||||
}
|
||||
|
||||
// StateSummary returns the state summary object from the db using input block root.
|
||||
func (s *Store) StateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.StateSummary")
|
||||
@@ -96,10 +129,16 @@ func (s *Store) saveCachedStateSummariesDB(ctx context.Context) error {
|
||||
}
|
||||
encs[i] = enc
|
||||
}
|
||||
var errs []error
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateSummaryBucket)
|
||||
bBkt := tx.Bucket(blocksBucket)
|
||||
ssBkt := tx.Bucket(stateSummaryBucket)
|
||||
for i, s := range summaries {
|
||||
if err := bucket.Put(s.Root, encs[i]); err != nil {
|
||||
if bBkt.Get(s.Root) == nil {
|
||||
errs = append(errs, errors.Wrapf(ErrNotFoundBlock, "failed to save state summary with block root %#x", s.Root))
|
||||
continue
|
||||
}
|
||||
if err := ssBkt.Put(s.Root, encs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -108,7 +147,7 @@ func (s *Store) saveCachedStateSummariesDB(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
s.stateSummaryCache.clear()
|
||||
return nil
|
||||
return multierr.Combine(errs...)
|
||||
}
|
||||
|
||||
// deleteStateSummary deletes a state summary object from the db using input block root.
|
||||
|
||||
@@ -4,18 +4,27 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
)
|
||||
|
||||
func TestStateSummary_CanSaveRetrieve(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
r1 := bytesutil.ToBytes32([]byte{'A'})
|
||||
r2 := bytesutil.ToBytes32([]byte{'B'})
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
r1, err := util.SaveBlock(t, ctx, db, b1).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b2 := util.NewBeaconBlock()
|
||||
b2.Block.Slot = 2
|
||||
r2, err := util.SaveBlock(t, ctx, db, b2).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s1 := ðpb.StateSummary{Slot: 1, Root: r1[:]}
|
||||
|
||||
// State summary should not exist yet.
|
||||
@@ -43,23 +52,130 @@ func TestStateSummary_CanSaveRetrieve(t *testing.T) {
|
||||
func TestStateSummary_CacheToDB(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
summaries := make([]*ethpb.StateSummary, stateSummaryCachePruneCount-1)
|
||||
roots := make([][32]byte, stateSummaryCachePruneCount-1)
|
||||
for i := range summaries {
|
||||
summaries[i] = ðpb.StateSummary{Slot: types.Slot(i), Root: bytesutil.PadTo(bytesutil.Uint64ToBytesLittleEndian(uint64(i)), 32)}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = types.Slot(i)
|
||||
b.Block.Body.Graffiti = bytesutil.PadTo([]byte{byte(i)}, 32)
|
||||
r, err := util.SaveBlock(t, ctx, db, b).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
summaries[i] = ðpb.StateSummary{Slot: types.Slot(i), Root: r[:]}
|
||||
roots[i] = r
|
||||
}
|
||||
|
||||
require.NoError(t, db.SaveStateSummaries(context.Background(), summaries))
|
||||
require.Equal(t, db.stateSummaryCache.len(), stateSummaryCachePruneCount-1)
|
||||
|
||||
require.NoError(t, db.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1000, Root: []byte{'a', 'b'}}))
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = types.Slot(1000)
|
||||
r, err := util.SaveBlock(t, ctx, db, b).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, db.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1000, Root: r[:]}))
|
||||
require.Equal(t, db.stateSummaryCache.len(), stateSummaryCachePruneCount)
|
||||
|
||||
require.NoError(t, db.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1001, Root: []byte{'c', 'd'}}))
|
||||
b = util.NewBeaconBlock()
|
||||
b.Block.Slot = 1001
|
||||
|
||||
r, err = util.SaveBlock(t, ctx, db, b).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, db.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1001, Root: r[:]}))
|
||||
require.Equal(t, db.stateSummaryCache.len(), 1)
|
||||
|
||||
for _, r := range roots {
|
||||
require.Equal(t, true, db.HasStateSummary(context.Background(), r))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateSummary_CacheToDB_FailsIfMissingBlock(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
summaries := make([]*ethpb.StateSummary, stateSummaryCachePruneCount-1)
|
||||
for i := range summaries {
|
||||
r := bytesutil.Uint64ToBytesLittleEndian(uint64(i))
|
||||
require.Equal(t, true, db.HasStateSummary(context.Background(), bytesutil.ToBytes32(r)))
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = types.Slot(i)
|
||||
b.Block.Body.Graffiti = bytesutil.PadTo([]byte{byte(i)}, 32)
|
||||
r, err := util.SaveBlock(t, ctx, db, b).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
summaries[i] = ðpb.StateSummary{Slot: types.Slot(i), Root: r[:]}
|
||||
}
|
||||
|
||||
require.NoError(t, db.SaveStateSummaries(context.Background(), summaries))
|
||||
require.Equal(t, db.stateSummaryCache.len(), stateSummaryCachePruneCount-1)
|
||||
|
||||
junkRoot := [32]byte{1, 2, 3}
|
||||
|
||||
require.NoError(t, db.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1000, Root: junkRoot[:]}))
|
||||
require.Equal(t, db.stateSummaryCache.len(), stateSummaryCachePruneCount)
|
||||
|
||||
// Next insertion causes the buffer to flush.
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1001
|
||||
r, err := util.SaveBlock(t, ctx, db, b).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.ErrorIs(t, db.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1001, Root: r[:]}), ErrNotFoundBlock)
|
||||
require.Equal(t, db.HasStateSummary(ctx, junkRoot), false)
|
||||
|
||||
require.NoError(t, db.deleteStateSummary(junkRoot)) // Delete bad summary or db will throw an error on test cleanup.
|
||||
}
|
||||
|
||||
func TestStateSummary_CacheToDB_UsesProvidedBlockCache(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
summaries := make([]*ethpb.StateSummary, stateSummaryCachePruneCount-1)
|
||||
bCache := make(map[[32]byte]interfaces.SignedBeaconBlock, stateSummaryCachePruneCount-1)
|
||||
for i := range summaries {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = types.Slot(i)
|
||||
b.Block.Body.Graffiti = bytesutil.PadTo([]byte{byte(i)}, 32)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := wsb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
bCache[r] = wsb
|
||||
summaries[i] = ðpb.StateSummary{Slot: types.Slot(i), Root: r[:]}
|
||||
}
|
||||
|
||||
// Simulated initial sync block cache.
|
||||
pendingBlocksFn := func(r [32]byte) (interfaces.SignedBeaconBlock, error) {
|
||||
b, ok := bCache[r]
|
||||
if !ok {
|
||||
return nil, ErrNotFoundBlock
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
require.NoError(t, db.SaveStateSummariesWithPendingBlocks(context.Background(), summaries, pendingBlocksFn))
|
||||
require.Equal(t, db.stateSummaryCache.len(), stateSummaryCachePruneCount-1)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = types.Slot(1000)
|
||||
r, err := util.SaveBlock(t, ctx, db, b).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, db.SaveStateSummariesWithPendingBlocks(context.Background(), []*ethpb.StateSummary{{Slot: 1000, Root: r[:]}}, pendingBlocksFn))
|
||||
require.Equal(t, db.stateSummaryCache.len(), stateSummaryCachePruneCount)
|
||||
|
||||
// Next insertion causes the buffer to flush.
|
||||
b = util.NewBeaconBlock()
|
||||
b.Block.Slot = 1001
|
||||
r, err = util.SaveBlock(t, ctx, db, b).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, db.SaveStateSummariesWithPendingBlocks(context.Background(), []*ethpb.StateSummary{{Slot: 1001, Root: r[:]}}, pendingBlocksFn))
|
||||
require.Equal(t, db.stateSummaryCache.len(), 1)
|
||||
|
||||
// All blocks in the bCache should have been saved.
|
||||
for r := range bCache {
|
||||
require.Equal(t, true, db.HasBlock(ctx, r), "Block %#x was not saved to db", r)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ func TestRestore(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
|
||||
backupDb, err := kv.NewKVStore(context.Background(), t.TempDir(), &kv.Config{})
|
||||
backupDb, err := kv.NewKVStore(context.Background(), t.TempDir())
|
||||
require.NoError(t, err)
|
||||
head := util.NewBeaconBlock()
|
||||
head.Block.Slot = 5000
|
||||
@@ -58,7 +58,7 @@ func TestRestore(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(files))
|
||||
assert.Equal(t, kv.DatabaseFileName, files[0].Name())
|
||||
restoredDb, err := kv.NewKVStore(context.Background(), path.Join(restoreDir, kv.BeaconNodeDbDirName), &kv.Config{})
|
||||
restoredDb, err := kv.NewKVStore(context.Background(), path.Join(restoreDir, kv.BeaconNodeDbDirName))
|
||||
defer func() {
|
||||
require.NoError(t, restoredDb.Close())
|
||||
}()
|
||||
|
||||
@@ -21,13 +21,10 @@ const (
|
||||
// DatabaseFileName is the name of the beacon node database.
|
||||
DatabaseFileName = "slasher.db"
|
||||
boltAllocSize = 8 * 1024 * 1024
|
||||
// Specifies the initial mmap size of bolt.
|
||||
mmapSize = 536870912
|
||||
)
|
||||
|
||||
// Config for the bolt db kv store.
|
||||
type Config struct {
|
||||
InitialMMapSize int
|
||||
}
|
||||
|
||||
// Store defines an implementation of the Prysm Database interface
|
||||
// using BoltDB as the underlying persistent kv-store for Ethereum consensus.
|
||||
type Store struct {
|
||||
@@ -39,7 +36,7 @@ type Store struct {
|
||||
// NewKVStore initializes a new boltDB key-value store at the directory
|
||||
// path specified, creates the kv-buckets based on the schema, and stores
|
||||
// an open connection db object as a property of the Store struct.
|
||||
func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, error) {
|
||||
func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
|
||||
hasDir, err := file.HasDir(dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -55,7 +52,7 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
params.BeaconIoConfig().ReadWritePermissions,
|
||||
&bolt.Options{
|
||||
Timeout: 1 * time.Second,
|
||||
InitialMmapSize: config.InitialMMapSize,
|
||||
InitialMmapSize: mmapSize,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
// setupDB instantiates and returns a Store instance.
|
||||
func setupDB(t testing.TB) *Store {
|
||||
db, err := NewKVStore(context.Background(), t.TempDir(), &Config{})
|
||||
db, err := NewKVStore(context.Background(), t.TempDir())
|
||||
require.NoError(t, err, "Failed to instantiate DB")
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close(), "Failed to close database")
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
|
||||
// SetupDB instantiates and returns database backed by key value store.
|
||||
func SetupDB(t testing.TB) db.Database {
|
||||
s, err := kv.NewKVStore(context.Background(), t.TempDir(), &kv.Config{})
|
||||
s, err := kv.NewKVStore(context.Background(), t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -28,7 +28,7 @@ func SetupDB(t testing.TB) db.Database {
|
||||
|
||||
// SetupSlasherDB --
|
||||
func SetupSlasherDB(t testing.TB) iface.SlasherDatabase {
|
||||
s, err := slasherkv.NewKVStore(context.Background(), t.TempDir(), &slasherkv.Config{})
|
||||
s, err := slasherkv.NewKVStore(context.Background(), t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -24,12 +24,18 @@ var (
|
||||
configMismatchLog = "Configuration mismatch between your execution client and Prysm. " +
|
||||
"Please check your execution client and restart it with the proper configuration. If this is not done, " +
|
||||
"your node will not be able to complete the proof-of-stake transition"
|
||||
needsEnginePortLog = "Could not check execution client configuration. " +
|
||||
"You are probably connecting to your execution client on the wrong port. For the Ethereum " +
|
||||
"merge, you will need to connect to your " +
|
||||
"execution client on port 8551 rather than 8545. This is known as the 'engine API' port and needs to be " +
|
||||
"authenticated if connecting via HTTP. See our documentation on how to set up this up here " +
|
||||
"https://docs.prylabs.network/docs/execution-node/authentication"
|
||||
)
|
||||
|
||||
// Checks the transition configuration between Prysm and the connected execution node to ensure
|
||||
// there are no differences in terminal block difficulty and block hash.
|
||||
// If there are any discrepancies, we must log errors to ensure users can resolve
|
||||
//the problem and be ready for the merge transition.
|
||||
// the problem and be ready for the merge transition.
|
||||
func (s *Service) checkTransitionConfiguration(
|
||||
ctx context.Context, blockNotifications chan *feed.Event,
|
||||
) {
|
||||
@@ -48,10 +54,14 @@ func (s *Service) checkTransitionConfiguration(
|
||||
}
|
||||
err := s.ExchangeTransitionConfiguration(ctx, cfg)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrConfigMismatch) {
|
||||
switch {
|
||||
case errors.Is(err, ErrConfigMismatch):
|
||||
log.WithError(err).Fatal(configMismatchLog)
|
||||
case errors.Is(err, ErrMethodNotFound):
|
||||
log.WithError(err).Error(needsEnginePortLog)
|
||||
default:
|
||||
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
|
||||
}
|
||||
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
|
||||
}
|
||||
|
||||
// We poll the execution client to see if the transition configuration has changed.
|
||||
@@ -115,6 +125,9 @@ func (s *Service) handleExchangeConfigurationError(err error) {
|
||||
s.runError = err
|
||||
log.WithError(err).Error(configMismatchLog)
|
||||
return
|
||||
} else if errors.Is(err, ErrMethodNotFound) {
|
||||
log.WithError(err).Error(needsEnginePortLog)
|
||||
return
|
||||
}
|
||||
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
|
||||
}
|
||||
|
||||
@@ -537,22 +537,31 @@ func handleRPCError(err error) error {
|
||||
}
|
||||
switch e.ErrorCode() {
|
||||
case -32700:
|
||||
errParseCount.Inc()
|
||||
return ErrParse
|
||||
case -32600:
|
||||
errInvalidRequestCount.Inc()
|
||||
return ErrInvalidRequest
|
||||
case -32601:
|
||||
errMethodNotFoundCount.Inc()
|
||||
return ErrMethodNotFound
|
||||
case -32602:
|
||||
errInvalidParamsCount.Inc()
|
||||
return ErrInvalidParams
|
||||
case -32603:
|
||||
errInternalCount.Inc()
|
||||
return ErrInternal
|
||||
case -38001:
|
||||
errUnknownPayloadCount.Inc()
|
||||
return ErrUnknownPayload
|
||||
case -38002:
|
||||
errInvalidForkchoiceStateCount.Inc()
|
||||
return ErrInvalidForkchoiceState
|
||||
case -38003:
|
||||
errInvalidPayloadAttributesCount.Inc()
|
||||
return ErrInvalidPayloadAttributes
|
||||
case -32000:
|
||||
errServerErrorCount.Inc()
|
||||
// Only -32000 status codes are data errors in the RPC specification.
|
||||
errWithData, ok := err.(rpc.DataError)
|
||||
if !ok {
|
||||
|
||||
@@ -403,6 +403,10 @@ func (s *Service) processBlockInBatch(ctx context.Context, currentBlockNum uint6
|
||||
}
|
||||
}
|
||||
|
||||
s.latestEth1DataLock.RLock()
|
||||
lastReqBlock := s.latestEth1Data.LastRequestedBlock
|
||||
s.latestEth1DataLock.RUnlock()
|
||||
|
||||
for _, filterLog := range logs {
|
||||
if filterLog.BlockNumber > currentBlockNum {
|
||||
if err := s.checkHeaderRange(ctx, currentBlockNum, filterLog.BlockNumber-1, headersMap, requestHeaders); err != nil {
|
||||
@@ -415,6 +419,13 @@ func (s *Service) processBlockInBatch(ctx context.Context, currentBlockNum uint6
|
||||
currentBlockNum = filterLog.BlockNumber
|
||||
}
|
||||
if err := s.ProcessLog(ctx, filterLog); err != nil {
|
||||
// In the event the execution client gives us a garbled/bad log
|
||||
// we reset the last requested block to the previous valid block range. This
|
||||
// prevents the beacon from advancing processing of logs to another range
|
||||
// in the event of an execution client failure.
|
||||
s.latestEth1DataLock.Lock()
|
||||
s.latestEth1Data.LastRequestedBlock = lastReqBlock
|
||||
s.latestEth1DataLock.Unlock()
|
||||
return 0, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,6 +31,42 @@ var (
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
errParseCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_parse_error_count",
|
||||
Help: "The number of errors that occurred while parsing execution payload",
|
||||
})
|
||||
errInvalidRequestCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_invalid_request_count",
|
||||
Help: "The number of errors that occurred due to invalid request",
|
||||
})
|
||||
errMethodNotFoundCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_method_not_found_count",
|
||||
Help: "The number of errors that occurred due to method not found",
|
||||
})
|
||||
errInvalidParamsCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_invalid_params_count",
|
||||
Help: "The number of errors that occurred due to invalid params",
|
||||
})
|
||||
errInternalCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_internal_error_count",
|
||||
Help: "The number of errors that occurred due to internal error",
|
||||
})
|
||||
errUnknownPayloadCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_unknown_payload_count",
|
||||
Help: "The number of errors that occurred due to unknown payload",
|
||||
})
|
||||
errInvalidForkchoiceStateCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_invalid_forkchoice_state_count",
|
||||
Help: "The number of errors that occurred due to invalid forkchoice state",
|
||||
})
|
||||
errInvalidPayloadAttributesCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_invalid_payload_attributes_count",
|
||||
Help: "The number of errors that occurred due to invalid payload attributes",
|
||||
})
|
||||
errServerErrorCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_server_error_count",
|
||||
Help: "The number of errors that occurred due to server error",
|
||||
})
|
||||
reconstructedExecutionPayloadCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "reconstructed_execution_payload_count",
|
||||
Help: "Count the number of execution payloads that are reconstructed using JSON-RPC from payload headers",
|
||||
|
||||
@@ -36,6 +36,14 @@ func WithHttpEndpointAndJWTSecret(endpointString string, secret []byte) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithHeaders adds headers to the execution node JSON-RPC requests.
|
||||
func WithHeaders(headers []string) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.headers = headers
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDepositContractAddress for the deposit contract.
|
||||
func WithDepositContractAddress(addr common.Address) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
@@ -37,7 +38,13 @@ func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpo
|
||||
// Ensure we have the correct chain and deposit IDs.
|
||||
if err := ensureCorrectExecutionChain(ctx, fetcher); err != nil {
|
||||
client.Close()
|
||||
return errors.Wrap(err, "could not make initial request to verify execution chain ID")
|
||||
errStr := err.Error()
|
||||
if strings.Contains(errStr, "401 Unauthorized") {
|
||||
errStr = "could not verify execution chain ID as your connection is not authenticated. " +
|
||||
"If connecting to your execution client via HTTP, you will need to set up JWT authentication. " +
|
||||
"See our documentation here https://docs.prylabs.network/docs/execution-node/authentication"
|
||||
}
|
||||
return errors.Wrap(err, errStr)
|
||||
}
|
||||
s.updateConnectedETH1(true)
|
||||
s.runError = nil
|
||||
@@ -65,7 +72,6 @@ func (s *Service) pollConnectionStatus(ctx context.Context) {
|
||||
currClient := s.rpcClient
|
||||
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
|
||||
errorLogger(err, "Could not connect to execution client endpoint")
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
continue
|
||||
}
|
||||
// Close previous client, if connection was successful.
|
||||
@@ -114,7 +120,7 @@ func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.End
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "":
|
||||
case "", "ipc":
|
||||
client, err = gethRPC.DialIPC(ctx, endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -129,6 +135,16 @@ func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.End
|
||||
}
|
||||
client.SetHeader("Authorization", header)
|
||||
}
|
||||
for _, h := range s.cfg.headers {
|
||||
if h != "" {
|
||||
keyValue := strings.Split(h, "=")
|
||||
if len(keyValue) < 2 {
|
||||
log.Warnf("Incorrect HTTP header flag format. Skipping %v", keyValue[0])
|
||||
continue
|
||||
}
|
||||
client.SetHeader(keyValue[0], strings.Join(keyValue[1:], "="))
|
||||
}
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -66,10 +66,6 @@ var (
|
||||
logThreshold = 8
|
||||
// period to log chainstart related information
|
||||
logPeriod = 1 * time.Minute
|
||||
// threshold of how old we will accept an eth1 node's head to be.
|
||||
eth1Threshold = 20 * time.Minute
|
||||
// error when eth1 node is too far behind.
|
||||
errFarBehind = errors.Errorf("eth1 head is more than %s behind from current wall clock time", eth1Threshold.String())
|
||||
)
|
||||
|
||||
// ChainStartFetcher retrieves information pertaining to the chain start event
|
||||
@@ -128,6 +124,7 @@ type config struct {
|
||||
eth1HeaderReqLimit uint64
|
||||
beaconNodeStatsUpdater BeaconNodeStatsUpdater
|
||||
currHttpEndpoint network.Endpoint
|
||||
headers []string
|
||||
finalizedStateAtStartup state.BeaconState
|
||||
}
|
||||
|
||||
@@ -316,11 +313,6 @@ func (s *Service) updateBeaconNodeStats() {
|
||||
s.cfg.beaconNodeStatsUpdater.Update(bs)
|
||||
}
|
||||
|
||||
func (s *Service) updateCurrHttpEndpoint(endpoint network.Endpoint) {
|
||||
s.cfg.currHttpEndpoint = endpoint
|
||||
s.updateBeaconNodeStats()
|
||||
}
|
||||
|
||||
func (s *Service) updateConnectedETH1(state bool) {
|
||||
s.connectedETH1 = state
|
||||
s.updateBeaconNodeStats()
|
||||
@@ -608,11 +600,6 @@ func (s *Service) run(done <-chan struct{}) {
|
||||
log.WithError(err).Debug("Could not fetch latest eth1 header")
|
||||
continue
|
||||
}
|
||||
if eth1HeadIsBehind(head.Time) {
|
||||
s.pollConnectionStatus(s.ctx)
|
||||
log.WithError(errFarBehind).Debug("Could not get an up to date eth1 header")
|
||||
continue
|
||||
}
|
||||
s.processBlockHeader(head)
|
||||
s.handleETH1FollowDistance()
|
||||
case <-chainstartTicker.C:
|
||||
@@ -838,11 +825,3 @@ func dedupEndpoints(endpoints []string) []string {
|
||||
}
|
||||
return newEndpoints
|
||||
}
|
||||
|
||||
// Checks if the provided timestamp is beyond the prescribed bound from
|
||||
// the current wall clock time.
|
||||
func eth1HeadIsBehind(timestamp uint64) bool {
|
||||
timeout := prysmTime.Now().Add(-eth1Threshold)
|
||||
// check that web3 client is syncing
|
||||
return time.Unix(int64(timestamp), 0).Before(timeout) // lint:ignore uintcast -- timestamp will not exceed int64 in your lifetime.
|
||||
}
|
||||
|
||||
@@ -146,7 +146,7 @@ func TestStart_OK(t *testing.T) {
|
||||
WithDepositContractAddress(testAcc.ContractAddr),
|
||||
WithDatabase(beaconDB),
|
||||
)
|
||||
require.NoError(t, err, "unable to setup web3 ETH1.0 chain service")
|
||||
require.NoError(t, err, "unable to setup execution service")
|
||||
web3Service = setDefaultMocks(web3Service)
|
||||
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
|
||||
web3Service.depositContractCaller, err = contracts.NewDepositContractCaller(testAcc.ContractAddr, testAcc.Backend)
|
||||
@@ -156,7 +156,7 @@ func TestStart_OK(t *testing.T) {
|
||||
web3Service.Start()
|
||||
if len(hook.Entries) > 0 {
|
||||
msg := hook.LastEntry().Message
|
||||
want := "Could not connect to ETH1.0 chain RPC client"
|
||||
want := "Could not connect to execution endpoint"
|
||||
if strings.Contains(want, msg) {
|
||||
t.Errorf("incorrect log, expected %s, got %s", want, msg)
|
||||
}
|
||||
@@ -473,6 +473,7 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) {
|
||||
|
||||
emptyState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), s.cfg.beaconDB, headBlock)
|
||||
require.NoError(t, s.cfg.beaconDB.SaveGenesisBlockRoot(context.Background(), headRoot))
|
||||
require.NoError(t, s.cfg.beaconDB.SaveState(context.Background(), emptyState, headRoot))
|
||||
require.NoError(t, stateGen.SaveState(context.Background(), headRoot, emptyState))
|
||||
@@ -752,15 +753,6 @@ func TestService_ValidateDepositContainers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampIsChecked(t *testing.T) {
|
||||
timestamp := uint64(time.Now().Unix())
|
||||
assert.Equal(t, false, eth1HeadIsBehind(timestamp))
|
||||
|
||||
// Give an older timestmap beyond threshold.
|
||||
timestamp = uint64(time.Now().Add(-eth1Threshold).Add(-1 * time.Minute).Unix())
|
||||
assert.Equal(t, true, eth1HeadIsBehind(timestamp))
|
||||
}
|
||||
|
||||
func TestETH1Endpoints(t *testing.T) {
|
||||
server, firstEndpoint, err := mockExecution.SetupRPCServer()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd:__subpackages__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
@@ -17,6 +18,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -31,6 +31,7 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
@@ -62,12 +63,14 @@ go_test(
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v3:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -3,6 +3,7 @@ package doublylinkedtree
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
@@ -82,7 +84,8 @@ func (f *ForkChoice) Head(
|
||||
|
||||
jc := f.JustifiedCheckpoint()
|
||||
fc := f.FinalizedCheckpoint()
|
||||
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch); err != nil {
|
||||
currentEpoch := slots.EpochsSinceGenesis(time.Unix(int64(f.store.genesisTime), 0))
|
||||
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not update best descendant")
|
||||
}
|
||||
return f.store.head(ctx)
|
||||
@@ -183,11 +186,15 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
|
||||
return err
|
||||
}
|
||||
jcRoot := bytesutil.ToBytes32(jc.Root)
|
||||
// Releasing here the checkpoints lock because
|
||||
// AncestorRoot acquires a lock on nodes and that can
|
||||
// cause a double lock.
|
||||
f.store.checkpointsLock.Unlock()
|
||||
root, err := f.AncestorRoot(ctx, jcRoot, jSlot)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
f.store.checkpointsLock.Lock()
|
||||
if root == currentRoot {
|
||||
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
@@ -296,7 +303,8 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
|
||||
}
|
||||
|
||||
// updateBalances updates the balances that directly voted for each block taking into account the
|
||||
// validators' latest votes. This function requires a lock in Store.nodesLock.
|
||||
// validators' latest votes. This function requires a lock in Store.nodesLock
|
||||
// and votesLock
|
||||
func (f *ForkChoice) updateBalances(newBalances []uint64) error {
|
||||
for index, vote := range f.votes {
|
||||
// Skip if validator has been slashed
|
||||
@@ -424,6 +432,9 @@ func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoo
|
||||
// store-tracked list. Votes from these validators are not accounted for
|
||||
// in forkchoice.
|
||||
func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index types.ValidatorIndex) {
|
||||
f.votesLock.RLock()
|
||||
defer f.votesLock.RUnlock()
|
||||
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
// return early if the index was already included:
|
||||
@@ -433,8 +444,6 @@ func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index types.Validator
|
||||
f.store.slashedIndices[index] = true
|
||||
|
||||
// Subtract last vote from this equivocating validator
|
||||
f.votesLock.RLock()
|
||||
defer f.votesLock.RUnlock()
|
||||
|
||||
if index >= types.ValidatorIndex(len(f.balances)) {
|
||||
return
|
||||
@@ -484,30 +493,31 @@ func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *forkchoicetypes.Checkpoint) e
|
||||
}
|
||||
|
||||
// CommonAncestorRoot returns the common ancestor root between the two block roots r1 and r2.
|
||||
func (f *ForkChoice) CommonAncestorRoot(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, error) {
|
||||
func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, types.Slot, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "doublelinkedtree.CommonAncestorRoot")
|
||||
defer span.End()
|
||||
|
||||
// Do nothing if the input roots are the same.
|
||||
if r1 == r2 {
|
||||
return r1, nil
|
||||
}
|
||||
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
n1, ok := f.store.nodeByRoot[r1]
|
||||
if !ok || n1 == nil {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
|
||||
// Do nothing if the input roots are the same.
|
||||
if r1 == r2 {
|
||||
return r1, n1.slot, nil
|
||||
}
|
||||
|
||||
n2, ok := f.store.nodeByRoot[r2]
|
||||
if !ok || n2 == nil {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return [32]byte{}, ctx.Err()
|
||||
return [32]byte{}, 0, ctx.Err()
|
||||
}
|
||||
if n1.slot > n2.slot {
|
||||
n1 = n1.parent
|
||||
@@ -515,17 +525,17 @@ func (f *ForkChoice) CommonAncestorRoot(ctx context.Context, r1 [32]byte, r2 [32
|
||||
// This should not happen at runtime as the finalized
|
||||
// node has to be a common ancestor
|
||||
if n1 == nil {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
} else {
|
||||
n2 = n2.parent
|
||||
// Reaches the end of the tree and unable to find common ancestor.
|
||||
if n2 == nil {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
}
|
||||
if n1 == n2 {
|
||||
return n1.root, nil
|
||||
return n1.root, n1.slot, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -606,3 +616,52 @@ func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
|
||||
}
|
||||
return node.payloadHash
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns a full dump of forkhoice.
|
||||
func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceResponse, error) {
|
||||
jc := &v1.Checkpoint{
|
||||
Epoch: f.store.justifiedCheckpoint.Epoch,
|
||||
Root: f.store.justifiedCheckpoint.Root[:],
|
||||
}
|
||||
bjc := &v1.Checkpoint{
|
||||
Epoch: f.store.bestJustifiedCheckpoint.Epoch,
|
||||
Root: f.store.bestJustifiedCheckpoint.Root[:],
|
||||
}
|
||||
ujc := &v1.Checkpoint{
|
||||
Epoch: f.store.unrealizedJustifiedCheckpoint.Epoch,
|
||||
Root: f.store.unrealizedJustifiedCheckpoint.Root[:],
|
||||
}
|
||||
fc := &v1.Checkpoint{
|
||||
Epoch: f.store.finalizedCheckpoint.Epoch,
|
||||
Root: f.store.finalizedCheckpoint.Root[:],
|
||||
}
|
||||
ufc := &v1.Checkpoint{
|
||||
Epoch: f.store.unrealizedFinalizedCheckpoint.Epoch,
|
||||
Root: f.store.unrealizedFinalizedCheckpoint.Root[:],
|
||||
}
|
||||
nodes := make([]*v1.ForkChoiceNode, 0, f.NodeCount())
|
||||
var err error
|
||||
if f.store.treeRootNode != nil {
|
||||
nodes, err = f.store.treeRootNode.nodeTreeDump(ctx, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var headRoot [32]byte
|
||||
if f.store.headNode != nil {
|
||||
headRoot = f.store.headNode.root
|
||||
}
|
||||
resp := &v1.ForkChoiceResponse{
|
||||
JustifiedCheckpoint: jc,
|
||||
BestJustifiedCheckpoint: bjc,
|
||||
UnrealizedJustifiedCheckpoint: ujc,
|
||||
FinalizedCheckpoint: fc,
|
||||
UnrealizedFinalizedCheckpoint: ufc,
|
||||
ProposerBoostRoot: f.store.proposerBoostRoot[:],
|
||||
PreviousProposerBoostRoot: f.store.previousProposerBoostRoot[:],
|
||||
HeadRoot: headRoot[:],
|
||||
ForkchoiceNodes: nodes,
|
||||
}
|
||||
return resp, nil
|
||||
|
||||
}
|
||||
|
||||
@@ -208,7 +208,7 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'2'}].weight)
|
||||
|
||||
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1))
|
||||
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
|
||||
f.store.nodesLock.Unlock()
|
||||
|
||||
@@ -408,73 +408,85 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
r1 [32]byte
|
||||
r2 [32]byte
|
||||
wantRoot [32]byte
|
||||
wantSlot types.Slot
|
||||
}{
|
||||
{
|
||||
name: "Common ancestor between c and b is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between c and d is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'d'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between c and e is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'e'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between g and f is c",
|
||||
r1: [32]byte{'g'},
|
||||
r2: [32]byte{'f'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between f and h is c",
|
||||
r1: [32]byte{'f'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between g and h is c",
|
||||
r1: [32]byte{'g'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between b and h is a",
|
||||
r1: [32]byte{'b'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between e and h is a",
|
||||
r1: [32]byte{'e'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between i and f is c",
|
||||
r1: [32]byte{'i'},
|
||||
r2: [32]byte{'f'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between e and h is a",
|
||||
r1: [32]byte{'j'},
|
||||
r2: [32]byte{'g'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
|
||||
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wantRoot, gotRoot)
|
||||
require.Equal(t, tc.wantSlot, gotSlot)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -497,46 +509,53 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
r1 [32]byte
|
||||
r2 [32]byte
|
||||
wantRoot [32]byte
|
||||
wantSlot types.Slot
|
||||
}{
|
||||
{
|
||||
name: "Common ancestor between a and b is a",
|
||||
r1: [32]byte{'a'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between b and d is b",
|
||||
r1: [32]byte{'d'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'b'},
|
||||
wantSlot: 1,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between d and a is a",
|
||||
r1: [32]byte{'d'},
|
||||
r2: [32]byte{'a'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
|
||||
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wantRoot, gotRoot)
|
||||
require.Equal(t, tc.wantSlot, gotSlot)
|
||||
})
|
||||
}
|
||||
|
||||
// Equal inputs should return the same root.
|
||||
r, err := f.CommonAncestorRoot(ctx, [32]byte{'b'}, [32]byte{'b'})
|
||||
r, s, err := f.CommonAncestor(ctx, [32]byte{'b'}, [32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'b'}, r)
|
||||
require.Equal(t, types.Slot(1), s)
|
||||
// Requesting finalized root (last node) should return the same root.
|
||||
r, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'a'})
|
||||
r, s, err = f.CommonAncestor(ctx, [32]byte{'a'}, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'a'}, r)
|
||||
require.Equal(t, types.Slot(0), s)
|
||||
// Requesting unknown root
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'z'})
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'a'}, [32]byte{'z'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'z'}, [32]byte{'a'})
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'z'}, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
n := &Node{
|
||||
slot: 100,
|
||||
@@ -550,7 +569,7 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
|
||||
f.store.nodeByRoot[[32]byte{'y'}] = n
|
||||
// broken link
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'y'}, [32]byte{'a'})
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'y'}, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
}
|
||||
|
||||
|
||||
@@ -5,8 +5,10 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
)
|
||||
|
||||
// depth returns the length of the path to the root of Fork Choice
|
||||
@@ -40,8 +42,9 @@ func (n *Node) applyWeightChanges(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateBestDescendant updates the best descendant of this node and its children.
|
||||
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch types.Epoch) error {
|
||||
// updateBestDescendant updates the best descendant of this node and its
|
||||
// children. This function assumes the caller has a lock on Store.nodesLock
|
||||
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch, currentEpoch types.Epoch) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
@@ -57,10 +60,10 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
|
||||
if child == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not update best descendant")
|
||||
}
|
||||
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch); err != nil {
|
||||
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, finalizedEpoch)
|
||||
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, finalizedEpoch, currentEpoch)
|
||||
if childLeadsToViableHead && !hasViableDescendant {
|
||||
// The child leads to a viable head, but the current
|
||||
// parent's best child doesn't.
|
||||
@@ -95,18 +98,24 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
|
||||
// viableForHead returns true if the node is viable to head.
|
||||
// Any node with different finalized or justified epoch than
|
||||
// the ones in fork choice store should not be viable to head.
|
||||
func (n *Node) viableForHead(justifiedEpoch, finalizedEpoch types.Epoch) bool {
|
||||
func (n *Node) viableForHead(justifiedEpoch, finalizedEpoch, currentEpoch types.Epoch) bool {
|
||||
justified := justifiedEpoch == n.justifiedEpoch || justifiedEpoch == 0
|
||||
finalized := finalizedEpoch == n.finalizedEpoch || finalizedEpoch == 0
|
||||
if features.Get().EnableDefensivePull && !justified && justifiedEpoch+1 == currentEpoch {
|
||||
if n.unrealizedJustifiedEpoch+1 >= currentEpoch {
|
||||
justified = true
|
||||
finalized = true
|
||||
}
|
||||
}
|
||||
|
||||
return justified && finalized
|
||||
}
|
||||
|
||||
func (n *Node) leadsToViableHead(justifiedEpoch, finalizedEpoch types.Epoch) bool {
|
||||
func (n *Node) leadsToViableHead(justifiedEpoch, finalizedEpoch, currentEpoch types.Epoch) bool {
|
||||
if n.bestDescendant == nil {
|
||||
return n.viableForHead(justifiedEpoch, finalizedEpoch)
|
||||
return n.viableForHead(justifiedEpoch, finalizedEpoch, currentEpoch)
|
||||
}
|
||||
return n.bestDescendant.viableForHead(justifiedEpoch, finalizedEpoch)
|
||||
return n.bestDescendant.viableForHead(justifiedEpoch, finalizedEpoch, currentEpoch)
|
||||
}
|
||||
|
||||
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
|
||||
@@ -115,10 +124,48 @@ func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if !n.optimistic || n.parent == nil {
|
||||
if !n.optimistic {
|
||||
return nil
|
||||
}
|
||||
|
||||
n.optimistic = false
|
||||
|
||||
if n.parent == nil {
|
||||
return nil
|
||||
}
|
||||
return n.parent.setNodeAndParentValidated(ctx)
|
||||
}
|
||||
|
||||
// nodeTreeDump appends to the given list all the nodes descending from this one
|
||||
func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]*v1.ForkChoiceNode, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
var parentRoot [32]byte
|
||||
if n.parent != nil {
|
||||
parentRoot = n.parent.root
|
||||
}
|
||||
thisNode := &v1.ForkChoiceNode{
|
||||
Slot: n.slot,
|
||||
Root: n.root[:],
|
||||
ParentRoot: parentRoot[:],
|
||||
JustifiedEpoch: n.justifiedEpoch,
|
||||
FinalizedEpoch: n.finalizedEpoch,
|
||||
UnrealizedJustifiedEpoch: n.unrealizedJustifiedEpoch,
|
||||
UnrealizedFinalizedEpoch: n.unrealizedFinalizedEpoch,
|
||||
Balance: n.balance,
|
||||
Weight: n.weight,
|
||||
ExecutionOptimistic: n.optimistic,
|
||||
ExecutionPayload: n.payloadHash[:],
|
||||
Timestamp: n.timestamp,
|
||||
}
|
||||
|
||||
nodes = append(nodes, thisNode)
|
||||
var err error
|
||||
for _, child := range n.children {
|
||||
nodes, err = child.nodeTreeDump(ctx, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
)
|
||||
@@ -113,7 +114,7 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 100
|
||||
s.nodeByRoot[indexToHash(2)].weight = 200
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1))
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
|
||||
assert.Equal(t, 2, len(s.treeRootNode.children))
|
||||
assert.Equal(t, s.treeRootNode.children[1], s.treeRootNode.bestDescendant)
|
||||
@@ -133,7 +134,7 @@ func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 200
|
||||
s.nodeByRoot[indexToHash(2)].weight = 100
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1))
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
|
||||
assert.Equal(t, 2, len(s.treeRootNode.children))
|
||||
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
|
||||
@@ -173,7 +174,7 @@ func TestNode_ViableForHead(t *testing.T) {
|
||||
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, 3, true},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
got := tc.n.viableForHead(tc.justifiedEpoch, tc.finalizedEpoch)
|
||||
got := tc.n.viableForHead(tc.justifiedEpoch, tc.finalizedEpoch, 5)
|
||||
assert.Equal(t, tc.want, got)
|
||||
}
|
||||
}
|
||||
@@ -197,15 +198,17 @@ func TestNode_LeadsToViableHead(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 3))
|
||||
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 3))
|
||||
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 3))
|
||||
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 3))
|
||||
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 3, 5))
|
||||
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 3, 5))
|
||||
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 3, 5))
|
||||
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 3, 5))
|
||||
}
|
||||
|
||||
func TestNode_SetFullyValidated(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
storeNodes := make([]*Node, 6)
|
||||
storeNodes[0] = f.store.treeRootNode
|
||||
// insert blocks in the fork pattern (optimistic status in parenthesis)
|
||||
//
|
||||
// 0 (false) -- 1 (false) -- 2 (false) -- 3 (true) -- 4 (true)
|
||||
@@ -215,20 +218,25 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[1] = f.store.nodeByRoot[blkRoot]
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, params.BeaconConfig().ZeroHash))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[2] = f.store.nodeByRoot[blkRoot]
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(1)))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[3] = f.store.nodeByRoot[blkRoot]
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[4] = f.store.nodeByRoot[blkRoot]
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[5] = f.store.nodeByRoot[blkRoot]
|
||||
|
||||
opt, err := f.IsOptimistic(indexToHash(5))
|
||||
require.NoError(t, err)
|
||||
@@ -253,4 +261,22 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
opt, err = f.IsOptimistic(indexToHash(3))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, opt)
|
||||
|
||||
respNodes := make([]*v1.ForkChoiceNode, 0)
|
||||
respNodes, err = f.store.treeRootNode.nodeTreeDump(ctx, respNodes)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(respNodes), f.NodeCount())
|
||||
|
||||
for i, respNode := range respNodes {
|
||||
require.Equal(t, storeNodes[i].slot, respNode.Slot)
|
||||
require.DeepEqual(t, storeNodes[i].root[:], respNode.Root)
|
||||
require.Equal(t, storeNodes[i].balance, respNode.Balance)
|
||||
require.Equal(t, storeNodes[i].weight, respNode.Weight)
|
||||
require.Equal(t, storeNodes[i].optimistic, respNode.ExecutionOptimistic)
|
||||
require.Equal(t, storeNodes[i].justifiedEpoch, respNode.JustifiedEpoch)
|
||||
require.Equal(t, storeNodes[i].unrealizedJustifiedEpoch, respNode.UnrealizedJustifiedEpoch)
|
||||
require.Equal(t, storeNodes[i].finalizedEpoch, respNode.FinalizedEpoch)
|
||||
require.Equal(t, storeNodes[i].unrealizedFinalizedEpoch, respNode.UnrealizedFinalizedEpoch)
|
||||
require.Equal(t, storeNodes[i].timestamp, respNode.Timestamp)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,15 +41,14 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
}
|
||||
|
||||
// Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
|
||||
f.store.checkpointsLock.Lock()
|
||||
|
||||
f.store.checkpointsLock.RLock()
|
||||
bjcp := f.store.bestJustifiedCheckpoint
|
||||
jcp := f.store.justifiedCheckpoint
|
||||
fcp := f.store.finalizedCheckpoint
|
||||
f.store.checkpointsLock.RUnlock()
|
||||
if bjcp.Epoch > jcp.Epoch {
|
||||
finalizedSlot, err := slots.EpochStart(fcp.Epoch)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -59,15 +58,15 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
// loop call here.
|
||||
r, err := f.AncestorRoot(ctx, bjcp.Root, finalizedSlot)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
if r == fcp.Root {
|
||||
f.store.checkpointsLock.Lock()
|
||||
f.store.prevJustifiedCheckpoint = jcp
|
||||
f.store.justifiedCheckpoint = bjcp
|
||||
f.store.checkpointsLock.Unlock()
|
||||
}
|
||||
}
|
||||
f.store.checkpointsLock.Unlock()
|
||||
if !features.Get().DisablePullTips {
|
||||
f.updateUnrealizedCheckpoints()
|
||||
}
|
||||
|
||||
@@ -389,3 +389,14 @@ func TestSetOptimisticToInvalid_ForkAtMerge_bis(t *testing.T) {
|
||||
})
|
||||
require.DeepEqual(t, roots, [][32]byte{{'b'}, {'c'}, {'d'}, {'e'}})
|
||||
}
|
||||
|
||||
func TestSetOptimisticToValid(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
op, err := f.IsOptimistic([32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, op)
|
||||
require.NoError(t, f.SetOptimisticToValid(context.Background(), [32]byte{}))
|
||||
op, err = f.IsOptimistic([32]byte{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, op)
|
||||
}
|
||||
|
||||
@@ -95,8 +95,8 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
if bestDescendant == nil {
|
||||
bestDescendant = justifiedNode
|
||||
}
|
||||
|
||||
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch) {
|
||||
currentEpoch := slots.EpochsSinceGenesis(time.Unix(int64(s.genesisTime), 0))
|
||||
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch, currentEpoch) {
|
||||
s.allTipsAreInvalid = true
|
||||
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch, justified Epoch %d, %d != %d, %d",
|
||||
bestDescendant.slot, bestDescendant.weight/10e9, bestDescendant.finalizedEpoch, bestDescendant.justifiedEpoch, s.finalizedCheckpoint.Epoch, s.justifiedCheckpoint.Epoch)
|
||||
@@ -142,6 +142,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
unrealizedFinalizedEpoch: finalizedEpoch,
|
||||
optimistic: true,
|
||||
payloadHash: payloadHash,
|
||||
timestamp: uint64(time.Now().Unix()),
|
||||
}
|
||||
|
||||
s.nodeByPayload[payloadHash] = n
|
||||
@@ -170,8 +171,11 @@ func (s *Store) insert(ctx context.Context,
|
||||
}
|
||||
|
||||
// Update best descendants
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx,
|
||||
s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch); err != nil {
|
||||
s.checkpointsLock.RLock()
|
||||
jEpoch := s.justifiedCheckpoint.Epoch
|
||||
fEpoch := s.finalizedCheckpoint.Epoch
|
||||
s.checkpointsLock.RUnlock()
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,6 +59,7 @@ type Node struct {
|
||||
weight uint64 // weight of this node: the total balance including children
|
||||
bestDescendant *Node // bestDescendant node of this node.
|
||||
optimistic bool // whether the block has been fully validated or not
|
||||
timestamp uint64 // The timestamp when the node was inserted.
|
||||
}
|
||||
|
||||
// Vote defines an individual validator's vote.
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
@@ -198,31 +199,36 @@ func TestStore_NoDeadLock(t *testing.T) {
|
||||
// D justifies and comes late.
|
||||
//
|
||||
func TestStore_ForkNextEpoch(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableDefensivePull: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
|
||||
// Epoch 1 blocks (D does not arrive)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 92, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 93, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 94, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// Epoch 2 blocks
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'c'}, [32]byte{'E'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 96, [32]byte{'e'}, [32]byte{'c'}, [32]byte{'E'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 97, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'g'}, [32]byte{'f'}, [32]byte{'G'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 98, [32]byte{'g'}, [32]byte{'f'}, [32]byte{'G'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 107, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 99, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -234,16 +240,25 @@ func TestStore_ForkNextEpoch(t *testing.T) {
|
||||
require.Equal(t, types.Epoch(0), f.JustifiedCheckpoint().Epoch)
|
||||
|
||||
// D arrives late, D is head
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 95, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'d'}, 1))
|
||||
f.store.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1}
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'d'}, 2))
|
||||
f.store.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 2}
|
||||
f.updateUnrealizedCheckpoints()
|
||||
headRoot, err = f.Head(ctx, []uint64{100})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'d'}, headRoot)
|
||||
require.Equal(t, types.Epoch(1), f.JustifiedCheckpoint().Epoch)
|
||||
require.Equal(t, types.Epoch(2), f.JustifiedCheckpoint().Epoch)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
|
||||
// Set current epoch to 3, and H's unrealized checkpoint. Check it's head
|
||||
driftGenesisTime(f, 99, 0)
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'h'}, 2))
|
||||
headRoot, err = f.Head(ctx, []uint64{100})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'h'}, headRoot)
|
||||
require.Equal(t, types.Epoch(2), f.JustifiedCheckpoint().Epoch)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
)
|
||||
|
||||
// ForkChoicer represents the full fork choice interface composed of all the sub-interfaces.
|
||||
@@ -51,7 +52,7 @@ type Getter interface {
|
||||
ProposerBoost() [fieldparams.RootLength]byte
|
||||
HasParent(root [32]byte) bool
|
||||
AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([32]byte, error)
|
||||
CommonAncestorRoot(ctx context.Context, root1 [32]byte, root2 [32]byte) ([32]byte, error)
|
||||
CommonAncestor(ctx context.Context, root1 [32]byte, root2 [32]byte) ([32]byte, types.Slot, error)
|
||||
IsCanonical(root [32]byte) bool
|
||||
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
FinalizedPayloadBlockHash() [32]byte
|
||||
@@ -62,6 +63,7 @@ type Getter interface {
|
||||
NodeCount() int
|
||||
HighestReceivedBlockSlot() types.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
ForkChoiceDump(context.Context) (*v1.ForkChoiceResponse, error)
|
||||
}
|
||||
|
||||
// Setter allows to set forkchoice information
|
||||
|
||||
@@ -32,6 +32,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
@@ -63,6 +64,7 @@ go_test(
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v3:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -10,7 +10,9 @@ import (
|
||||
)
|
||||
|
||||
// This computes validator balance delta from validator votes.
|
||||
// It returns a list of deltas that represents the difference between old balances and new balances.
|
||||
// It returns a list of deltas that represents the difference between old
|
||||
// balances and new balances. This function assumes the caller holds a lock in
|
||||
// Store.nodesLock and Store.votesLock
|
||||
func computeDeltas(
|
||||
ctx context.Context,
|
||||
count int,
|
||||
|
||||
@@ -41,15 +41,14 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
}
|
||||
|
||||
// Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
|
||||
f.store.checkpointsLock.Lock()
|
||||
|
||||
f.store.checkpointsLock.RLock()
|
||||
bjcp := f.store.bestJustifiedCheckpoint
|
||||
jcp := f.store.justifiedCheckpoint
|
||||
fcp := f.store.finalizedCheckpoint
|
||||
f.store.checkpointsLock.RUnlock()
|
||||
if bjcp.Epoch > jcp.Epoch {
|
||||
finalizedSlot, err := slots.EpochStart(fcp.Epoch)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -59,15 +58,15 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
|
||||
// loop call here.
|
||||
r, err := f.AncestorRoot(ctx, bjcp.Root, finalizedSlot)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
if r == fcp.Root {
|
||||
f.store.checkpointsLock.Lock()
|
||||
f.store.prevJustifiedCheckpoint = jcp
|
||||
f.store.justifiedCheckpoint = bjcp
|
||||
f.store.checkpointsLock.Unlock()
|
||||
}
|
||||
}
|
||||
f.store.checkpointsLock.Unlock()
|
||||
if !features.Get().DisablePullTips {
|
||||
f.updateUnrealizedCheckpoints()
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
pmath "github.com/prysmaticlabs/prysm/v3/math"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
@@ -188,11 +189,15 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
|
||||
return err
|
||||
}
|
||||
jcRoot := bytesutil.ToBytes32(jc.Root)
|
||||
// release the checkpoints lock here because
|
||||
// AncestorRoot takes a lock on nodes and that can lead
|
||||
// to double locks
|
||||
f.store.checkpointsLock.Unlock()
|
||||
root, err := f.AncestorRoot(ctx, jcRoot, jSlot)
|
||||
if err != nil {
|
||||
f.store.checkpointsLock.Unlock()
|
||||
return err
|
||||
}
|
||||
f.store.checkpointsLock.Lock()
|
||||
if root == currentRoot {
|
||||
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
@@ -277,47 +282,51 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
|
||||
}
|
||||
|
||||
// CommonAncestorRoot returns the common ancestor root between the two block roots r1 and r2.
|
||||
func (f *ForkChoice) CommonAncestorRoot(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, error) {
|
||||
func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, types.Slot, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "protoArray.CommonAncestorRoot")
|
||||
defer span.End()
|
||||
|
||||
// Do nothing if the two input roots are the same.
|
||||
if r1 == r2 {
|
||||
return r1, nil
|
||||
}
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
i1, ok := f.store.nodesIndices[r1]
|
||||
if !ok || i1 >= uint64(len(f.store.nodes)) {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
|
||||
// Do nothing if the two input roots are the same.
|
||||
if r1 == r2 {
|
||||
n1 := f.store.nodes[i1]
|
||||
return r1, n1.slot, nil
|
||||
}
|
||||
|
||||
i2, ok := f.store.nodesIndices[r2]
|
||||
if !ok || i2 >= uint64(len(f.store.nodes)) {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return [32]byte{}, ctx.Err()
|
||||
return [32]byte{}, 0, ctx.Err()
|
||||
}
|
||||
if i1 > i2 {
|
||||
n1 := f.store.nodes[i1]
|
||||
i1 = n1.parent
|
||||
// Reaches the end of the tree and unable to find common ancestor.
|
||||
if i1 >= uint64(len(f.store.nodes)) {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
} else {
|
||||
n2 := f.store.nodes[i2]
|
||||
i2 = n2.parent
|
||||
// Reaches the end of the tree and unable to find common ancestor.
|
||||
if i2 >= uint64(len(f.store.nodes)) {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
}
|
||||
if i1 == i2 {
|
||||
n1 := f.store.nodes[i1]
|
||||
return n1.root, nil
|
||||
return n1.root, n1.slot, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -406,8 +415,12 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
|
||||
if !s.viableForHead(bestNode) {
|
||||
s.allTipsAreInvalid = true
|
||||
s.checkpointsLock.RLock()
|
||||
jEpoch := s.justifiedCheckpoint.Epoch
|
||||
fEpoch := s.finalizedCheckpoint.Epoch
|
||||
s.checkpointsLock.RUnlock()
|
||||
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch %d != %d, justifiedEpoch %d != %d",
|
||||
bestNode.slot, bestNode.weight/10e9, bestNode.finalizedEpoch, s.finalizedCheckpoint.Epoch, bestNode.justifiedEpoch, s.justifiedCheckpoint.Epoch)
|
||||
bestNode.slot, bestNode.weight/10e9, bestNode.finalizedEpoch, fEpoch, bestNode.justifiedEpoch, jEpoch)
|
||||
}
|
||||
s.allTipsAreInvalid = false
|
||||
|
||||
@@ -426,7 +439,8 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
return bestNode.root, nil
|
||||
}
|
||||
|
||||
// updateCanonicalNodes updates the canonical nodes mapping given the input block root.
|
||||
// updateCanonicalNodes updates the canonical nodes mapping given the input
|
||||
// block root. This function assumes the caller holds a lock in Store.nodesLock
|
||||
func (s *Store) updateCanonicalNodes(ctx context.Context, root [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.updateCanonicalNodes")
|
||||
defer span.End()
|
||||
@@ -548,14 +562,14 @@ func (s *Store) insert(ctx context.Context,
|
||||
if slot > s.highestReceivedSlot {
|
||||
s.highestReceivedSlot = slot
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// applyWeightChanges iterates backwards through the nodes in store. It checks all nodes parent
|
||||
// and its best child. For each node, it updates the weight with input delta and
|
||||
// back propagate the nodes' delta to its parents' delta. After scoring changes,
|
||||
// the best child is then updated along with the best descendant.
|
||||
// the best child is then updated along with the best descendant. This function
|
||||
// assumes the caller holds a lock in Store.nodesLock
|
||||
func (s *Store) applyWeightChanges(
|
||||
ctx context.Context, newBalances []uint64, delta []int,
|
||||
) error {
|
||||
@@ -871,6 +885,15 @@ func (s *Store) viableForHead(node *Node) bool {
|
||||
// It's also viable if we are in genesis epoch.
|
||||
justified := s.justifiedCheckpoint.Epoch == node.justifiedEpoch || s.justifiedCheckpoint.Epoch == 0
|
||||
finalized := s.finalizedCheckpoint.Epoch == node.finalizedEpoch || s.finalizedCheckpoint.Epoch == 0
|
||||
if features.Get().EnableDefensivePull {
|
||||
currentEpoch := slots.EpochsSinceGenesis(time.Unix(int64(s.genesisTime), 0))
|
||||
if !justified && s.justifiedCheckpoint.Epoch+1 == currentEpoch {
|
||||
if node.unrealizedJustifiedEpoch+1 >= currentEpoch {
|
||||
justified = true
|
||||
finalized = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return justified && finalized
|
||||
}
|
||||
@@ -900,6 +923,8 @@ func (f *ForkChoice) Tips() ([][32]byte, []types.Slot) {
|
||||
// store-tracked list. Votes from these validators are not accounted for
|
||||
// in forkchoice.
|
||||
func (f *ForkChoice) InsertSlashedIndex(ctx context.Context, index types.ValidatorIndex) {
|
||||
f.votesLock.RLock()
|
||||
defer f.votesLock.RUnlock()
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
// return early if the index was already included:
|
||||
@@ -909,9 +934,6 @@ func (f *ForkChoice) InsertSlashedIndex(ctx context.Context, index types.Validat
|
||||
f.store.slashedIndices[index] = true
|
||||
|
||||
// Subtract last vote from this equivocating validator
|
||||
f.votesLock.RLock()
|
||||
defer f.votesLock.RUnlock()
|
||||
|
||||
if index >= types.ValidatorIndex(len(f.balances)) {
|
||||
return
|
||||
}
|
||||
@@ -1069,3 +1091,7 @@ func (f *ForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (*ForkChoice) ForkChoiceDump(_ context.Context) (*v1.ForkChoiceResponse, error) {
|
||||
return nil, errors.New("ForkChoiceDump is not supported by protoarray")
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
@@ -677,73 +678,85 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
r1 [32]byte
|
||||
r2 [32]byte
|
||||
wantRoot [32]byte
|
||||
wantSlot types.Slot
|
||||
}{
|
||||
{
|
||||
name: "Common ancestor between c and b is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between c and d is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'d'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between c and e is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'e'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between g and f is c",
|
||||
r1: [32]byte{'g'},
|
||||
r2: [32]byte{'f'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between f and h is c",
|
||||
r1: [32]byte{'f'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between g and h is c",
|
||||
r1: [32]byte{'g'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between b and h is a",
|
||||
r1: [32]byte{'b'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between e and h is a",
|
||||
r1: [32]byte{'e'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between i and f is c",
|
||||
r1: [32]byte{'i'},
|
||||
r2: [32]byte{'f'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between e and h is a",
|
||||
r1: [32]byte{'j'},
|
||||
r2: [32]byte{'g'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
|
||||
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wantRoot, gotRoot)
|
||||
require.Equal(t, tc.wantSlot, gotSlot)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -766,52 +779,59 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
r1 [32]byte
|
||||
r2 [32]byte
|
||||
wantRoot [32]byte
|
||||
wantSlot types.Slot
|
||||
}{
|
||||
{
|
||||
name: "Common ancestor between a and b is a",
|
||||
r1: [32]byte{'a'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between b and d is b",
|
||||
r1: [32]byte{'d'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'b'},
|
||||
wantSlot: 1,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between d and a is a",
|
||||
r1: [32]byte{'d'},
|
||||
r2: [32]byte{'a'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
|
||||
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wantRoot, gotRoot)
|
||||
require.Equal(t, tc.wantSlot, gotSlot)
|
||||
})
|
||||
}
|
||||
|
||||
// Equal inputs should return the same root.
|
||||
r, err := f.CommonAncestorRoot(ctx, [32]byte{'b'}, [32]byte{'b'})
|
||||
r, s, err := f.CommonAncestor(ctx, [32]byte{'b'}, [32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'b'}, r)
|
||||
require.Equal(t, types.Slot(1), s)
|
||||
// Requesting finalized root (last node) should return the same root.
|
||||
r, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'a'})
|
||||
r, s, err = f.CommonAncestor(ctx, [32]byte{'a'}, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'a'}, r)
|
||||
require.Equal(t, types.Slot(0), s)
|
||||
// Requesting unknown root
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'z'})
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'a'}, [32]byte{'z'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'z'}, [32]byte{'a'})
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'z'}, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'y'}, [32]byte{'z'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
// broken link
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'y'}, [32]byte{'a'})
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'y'}, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
}
|
||||
|
||||
@@ -868,6 +888,43 @@ func TestStore_ViableForHead(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_ViableForHead_DefensivePull(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableDefensivePull: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
tests := []struct {
|
||||
n *Node
|
||||
justifiedEpoch types.Epoch
|
||||
finalizedEpoch types.Epoch
|
||||
currentEpoch types.Epoch
|
||||
want bool
|
||||
}{
|
||||
{&Node{}, 0, 0, 0, true},
|
||||
{&Node{}, 1, 0, 1, false},
|
||||
{&Node{}, 0, 1, 1, false},
|
||||
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 1, 1, 1, true},
|
||||
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 2, 2, 2, false},
|
||||
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, 3, 3, true},
|
||||
{&Node{unrealizedFinalizedEpoch: 3, unrealizedJustifiedEpoch: 4}, 3, 2, 4, true},
|
||||
{&Node{unrealizedFinalizedEpoch: 2, unrealizedJustifiedEpoch: 3}, 3, 2, 4, true},
|
||||
{&Node{unrealizedFinalizedEpoch: 1, unrealizedJustifiedEpoch: 2}, 3, 2, 4, false},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: tc.justifiedEpoch}
|
||||
fc := &forkchoicetypes.Checkpoint{Epoch: tc.finalizedEpoch}
|
||||
currentTime := uint64(time.Now().Unix())
|
||||
driftSeconds := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
s := &Store{
|
||||
justifiedCheckpoint: jc,
|
||||
finalizedCheckpoint: fc,
|
||||
genesisTime: currentTime - driftSeconds*uint64(tc.currentEpoch),
|
||||
}
|
||||
assert.Equal(t, tc.want, s.viableForHead(tc.n))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_HasParent(t *testing.T) {
|
||||
tests := []struct {
|
||||
m map[[32]byte]uint64
|
||||
|
||||
@@ -53,6 +53,7 @@ go_test(
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -3,6 +3,7 @@ package monitor
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
@@ -33,7 +34,7 @@ func TestGetAttestingIndices(t *testing.T) {
|
||||
|
||||
func TestProcessIncludedAttestationTwoTracked(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
s, _ := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
require.NoError(t, state.SetCurrentParticipationBits(bytes.Repeat([]byte{0xff}, 13)))
|
||||
@@ -66,7 +67,7 @@ func TestProcessUnaggregatedAttestationStateNotCached(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
|
||||
s := setupService(t)
|
||||
s, _ := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
header := state.LatestBlockHeader()
|
||||
@@ -98,13 +99,13 @@ func TestProcessUnaggregatedAttestationStateCached(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
s := setupService(t)
|
||||
s, db := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
root := [32]byte{}
|
||||
copy(root[:], "hello-world")
|
||||
root, err := util.SaveBlock(t, ctx, db, util.NewBeaconBlock()).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
@@ -124,8 +125,9 @@ func TestProcessUnaggregatedAttestationStateCached(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
|
||||
s.processUnaggregatedAttestation(context.Background(), att)
|
||||
wanted1 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor"
|
||||
wanted2 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor"
|
||||
rootStr := fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))
|
||||
wanted1 := fmt.Sprintf("\"Processed unaggregated attestation\" Head=%s Slot=1 Source=%s Target=%s ValidatorIndex=2 prefix=monitor", rootStr, rootStr, rootStr)
|
||||
wanted2 := fmt.Sprintf("\"Processed unaggregated attestation\" Head=%s Slot=1 Source=%s Target=%s ValidatorIndex=12 prefix=monitor", rootStr, rootStr, rootStr)
|
||||
require.LogsContain(t, hook, wanted1)
|
||||
require.LogsContain(t, hook, wanted2)
|
||||
}
|
||||
@@ -135,7 +137,7 @@ func TestProcessAggregatedAttestationStateNotCached(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
|
||||
s := setupService(t)
|
||||
s, _ := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
header := state.LatestBlockHeader()
|
||||
@@ -170,13 +172,13 @@ func TestProcessAggregatedAttestationStateNotCached(t *testing.T) {
|
||||
func TestProcessAggregatedAttestationStateCached(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
s := setupService(t)
|
||||
s, db := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||
|
||||
root := [32]byte{}
|
||||
copy(root[:], "hello-world")
|
||||
root, err := util.SaveBlock(t, ctx, db, util.NewBeaconBlock()).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
att := ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
@@ -200,14 +202,15 @@ func TestProcessAggregatedAttestationStateCached(t *testing.T) {
|
||||
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, state))
|
||||
s.processAggregatedAttestation(ctx, att)
|
||||
require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x68656c6c6f2d Slot=1 SourceRoot=0x68656c6c6f2d TargetRoot=0x68656c6c6f2d prefix=monitor")
|
||||
require.LogsContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor")
|
||||
require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor")
|
||||
rootStr := fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))
|
||||
require.LogsContain(t, hook, fmt.Sprintf("\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=%s Slot=1 SourceRoot=%s TargetRoot=%s prefix=monitor", rootStr, rootStr, rootStr))
|
||||
require.LogsContain(t, hook, fmt.Sprintf("\"Processed aggregated attestation\" Head=%s Slot=1 Source=%s Target=%s ValidatorIndex=2 prefix=monitor", rootStr, rootStr, rootStr))
|
||||
require.LogsDoNotContain(t, hook, fmt.Sprintf("\"Processed aggregated attestation\" Head=%s Slot=1 Source=%s Target=%s ValidatorIndex=12 prefix=monitor", rootStr, rootStr, rootStr))
|
||||
}
|
||||
|
||||
func TestProcessAttestations(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
s, _ := setupService(t)
|
||||
ctx := context.Background()
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
require.NoError(t, state.SetSlot(2))
|
||||
|
||||
@@ -168,7 +168,7 @@ func TestProcessProposedBlock(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
s, _ := setupService(t)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 256)
|
||||
root := [32]byte{}
|
||||
copy(root[:], "hello-world")
|
||||
@@ -199,7 +199,7 @@ func TestProcessBlock_AllEventsTrackedVals(t *testing.T) {
|
||||
genConfig.FullSyncAggregate = true
|
||||
b, err := util.GenerateFullBlockAltair(genesis, keys, genConfig, 1)
|
||||
require.NoError(t, err)
|
||||
s := setupService(t)
|
||||
s, db := setupService(t)
|
||||
|
||||
pubKeys := make([][]byte, 3)
|
||||
pubKeys[0] = genesis.Validators()[0].PublicKey
|
||||
@@ -223,7 +223,7 @@ func TestProcessBlock_AllEventsTrackedVals(t *testing.T) {
|
||||
s.RUnlock()
|
||||
s.updateSyncCommitteeTrackedVals(genesis)
|
||||
|
||||
root, err := b.GetBlock().HashTreeRoot()
|
||||
root, err := util.SaveBlock(t, ctx, db, b).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, genesis))
|
||||
wanted1 := fmt.Sprintf("\"Proposed beacon block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:]))
|
||||
@@ -241,7 +241,7 @@ func TestProcessBlock_AllEventsTrackedVals(t *testing.T) {
|
||||
|
||||
func TestLogAggregatedPerformance(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
s, _ := setupService(t)
|
||||
|
||||
s.logAggregatedPerformance()
|
||||
time.Sleep(3000 * time.Millisecond)
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
func TestProcessSyncCommitteeContribution(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
s, _ := setupService(t)
|
||||
|
||||
contrib := ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
@@ -28,7 +28,7 @@ func TestProcessSyncCommitteeContribution(t *testing.T) {
|
||||
|
||||
func TestProcessSyncAggregate(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
s, _ := setupService(t)
|
||||
beaconState, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
|
||||
block := ðpb.BeaconBlockAltair{
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/iface"
|
||||
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
@@ -22,7 +23,7 @@ import (
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func setupService(t *testing.T) *Service {
|
||||
func setupService(t *testing.T) (*Service, iface.Database) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 256)
|
||||
|
||||
@@ -100,7 +101,7 @@ func setupService(t *testing.T) *Service {
|
||||
aggregatedPerformance: aggregatedPerformance,
|
||||
trackedSyncCommitteeIndices: trackedSyncCommitteeIndices,
|
||||
lastSyncedEpoch: 0,
|
||||
}
|
||||
}, beaconDB
|
||||
}
|
||||
|
||||
func TestTrackedIndex(t *testing.T) {
|
||||
@@ -116,7 +117,7 @@ func TestTrackedIndex(t *testing.T) {
|
||||
|
||||
func TestUpdateSyncCommitteeTrackedVals(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
s, _ := setupService(t)
|
||||
state, _ := util.DeterministicGenesisStateAltair(t, 1024)
|
||||
|
||||
s.updateSyncCommitteeTrackedVals(state)
|
||||
@@ -138,7 +139,7 @@ func TestNewService(t *testing.T) {
|
||||
|
||||
func TestStart(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
s, _ := setupService(t)
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
@@ -180,7 +181,7 @@ func TestStart(t *testing.T) {
|
||||
func TestInitializePerformanceStructures(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
s := setupService(t)
|
||||
s, _ := setupService(t)
|
||||
state, err := s.config.HeadFetcher.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
epoch := slots.ToEpoch(state.Slot())
|
||||
@@ -222,7 +223,7 @@ func TestInitializePerformanceStructures(t *testing.T) {
|
||||
func TestMonitorRoutine(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
s, db := setupService(t)
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
|
||||
@@ -242,7 +243,7 @@ func TestMonitorRoutine(t *testing.T) {
|
||||
genConfig := util.DefaultBlockGenConfig()
|
||||
block, err := util.GenerateFullBlockAltair(genesis, keys, genConfig, 1)
|
||||
require.NoError(t, err)
|
||||
root, err := block.GetBlock().HashTreeRoot()
|
||||
root, err := util.SaveBlock(t, ctx, db, block).Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.config.StateGen.SaveState(ctx, root, genesis))
|
||||
|
||||
@@ -266,7 +267,7 @@ func TestMonitorRoutine(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWaitForSync(t *testing.T) {
|
||||
s := setupService(t)
|
||||
s, _ := setupService(t)
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
@@ -290,7 +291,7 @@ func TestWaitForSync(t *testing.T) {
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s := setupService(t)
|
||||
s, _ := setupService(t)
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
|
||||
|
||||
@@ -26,7 +26,6 @@ go_library(
|
||||
"//beacon-chain/db/slasherkv:go_default_library",
|
||||
"//beacon-chain/deterministic-genesis:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/gateway:go_default_library",
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/slasherkv"
|
||||
interopcoldstart "github.com/prysmaticlabs/prysm/v3/beacon-chain/deterministic-genesis"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/gateway"
|
||||
@@ -100,14 +99,12 @@ type BeaconNode struct {
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
forkChoiceStore forkchoice.ForkChoicer
|
||||
stateGen *stategen.State
|
||||
collector *bcnodeCollector
|
||||
slasherBlockHeadersFeed *event.Feed
|
||||
slasherAttestationsFeed *event.Feed
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
blockchainFlagOpts []blockchain.Option
|
||||
GenesisInitializer genesis.Initializer
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
}
|
||||
@@ -229,9 +226,6 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Starting Fork Choice")
|
||||
beacon.startForkChoice()
|
||||
|
||||
log.Debugln("Registering Blockchain Service")
|
||||
if err := beacon.registerBlockchainService(); err != nil {
|
||||
return nil, err
|
||||
@@ -355,14 +349,6 @@ func (b *BeaconNode) Close() {
|
||||
close(b.stop)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startForkChoice() {
|
||||
if !features.Get().DisableForkchoiceDoublyLinkedTree {
|
||||
b.forkChoiceStore = doublylinkedtree.New()
|
||||
} else {
|
||||
b.forkChoiceStore = protoarray.New()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
|
||||
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
|
||||
@@ -371,9 +357,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
|
||||
log.WithField("database-path", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := db.NewDB(b.ctx, dbPath, &kv.Config{
|
||||
InitialMMapSize: cliCtx.Int(cmd.BoltMMapInitialSizeFlag.Name),
|
||||
})
|
||||
d, err := db.NewDB(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -395,9 +379,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
if err := d.ClearDB(); err != nil {
|
||||
return errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
d, err = db.NewDB(b.ctx, dbPath, &kv.Config{
|
||||
InitialMMapSize: cliCtx.Int(cmd.BoltMMapInitialSizeFlag.Name),
|
||||
})
|
||||
d, err = db.NewDB(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create new database")
|
||||
}
|
||||
@@ -467,9 +449,7 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
|
||||
log.WithField("database-path", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := slasherkv.NewKVStore(b.ctx, dbPath, &slasherkv.Config{
|
||||
InitialMMapSize: cliCtx.Int(cmd.BoltMMapInitialSizeFlag.Name),
|
||||
})
|
||||
d, err := slasherkv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -491,9 +471,7 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
if err := d.ClearDB(); err != nil {
|
||||
return errors.Wrap(err, "could not clear database")
|
||||
}
|
||||
d, err = slasherkv.NewKVStore(b.ctx, dbPath, &slasherkv.Config{
|
||||
InitialMMapSize: cliCtx.Int(cmd.BoltMMapInitialSizeFlag.Name),
|
||||
})
|
||||
d, err = slasherkv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create new database")
|
||||
}
|
||||
@@ -559,7 +537,6 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
|
||||
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
DisableDiscv5: cliCtx.Bool(flags.DisableDiscv5.Name),
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
})
|
||||
@@ -618,13 +595,19 @@ func (b *BeaconNode) registerBlockchainService() error {
|
||||
blockchain.WithSlashingPool(b.slashingsPool),
|
||||
blockchain.WithP2PBroadcaster(b.fetchP2P()),
|
||||
blockchain.WithStateNotifier(b),
|
||||
blockchain.WithForkChoiceStore(b.forkChoiceStore),
|
||||
blockchain.WithAttestationService(attService),
|
||||
blockchain.WithStateGen(b.stateGen),
|
||||
blockchain.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
blockchain.WithFinalizedStateAtStartUp(b.finalizedStateAtStartUp),
|
||||
blockchain.WithProposerIdsCache(b.proposerIdsCache),
|
||||
)
|
||||
|
||||
if features.Get().DisableForkchoiceDoublyLinkedTree {
|
||||
opts = append(opts, blockchain.WithForkChoiceStore(protoarray.New()))
|
||||
} else {
|
||||
opts = append(opts, blockchain.WithForkChoiceStore(doublylinkedtree.New()))
|
||||
}
|
||||
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not register blockchain service")
|
||||
@@ -852,7 +835,7 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
return b.services.RegisterService(rpcService)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerPrometheusService(cliCtx *cli.Context) error {
|
||||
func (b *BeaconNode) registerPrometheusService(_ *cli.Context) error {
|
||||
var additionalHandlers []prometheus.Handler
|
||||
var p *p2p.Service
|
||||
if err := b.services.FetchService(&p); err != nil {
|
||||
|
||||
@@ -35,6 +35,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd:__subpackages__",
|
||||
"//testing/endtoend/evaluators:__pkg__",
|
||||
"//tools:__subpackages__",
|
||||
],
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
type Config struct {
|
||||
NoDiscovery bool
|
||||
EnableUPnP bool
|
||||
DisableDiscv5 bool
|
||||
StaticPeers []string
|
||||
BootstrapNodeAddr []string
|
||||
Discv5BootStrapAddr []string
|
||||
|
||||
@@ -332,6 +332,31 @@ func (s *Service) isPeerAtLimit(inbound bool) bool {
|
||||
return activePeers >= maxPeers || numOfConns >= maxPeers
|
||||
}
|
||||
|
||||
// PeersFromStringAddrs convers peer raw ENRs into multiaddrs for p2p.
|
||||
func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
|
||||
var allAddrs []ma.Multiaddr
|
||||
enodeString, multiAddrString := parseGenericAddrs(addrs)
|
||||
for _, stringAddr := range multiAddrString {
|
||||
addr, err := multiAddrFromString(stringAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get multiaddr from string")
|
||||
}
|
||||
allAddrs = append(allAddrs, addr)
|
||||
}
|
||||
for _, stringAddr := range enodeString {
|
||||
enodeAddr, err := enode.Parse(enode.ValidSchemes, stringAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get enode from string")
|
||||
}
|
||||
addr, err := convertToSingleMultiAddr(enodeAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get multiaddr")
|
||||
}
|
||||
allAddrs = append(allAddrs, addr)
|
||||
}
|
||||
return allAddrs, nil
|
||||
}
|
||||
|
||||
func parseBootStrapAddrs(addrs []string) (discv5Nodes []string) {
|
||||
discv5Nodes, _ = parseGenericAddrs(addrs)
|
||||
if len(discv5Nodes) == 0 {
|
||||
@@ -435,30 +460,6 @@ func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
func peersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
|
||||
var allAddrs []ma.Multiaddr
|
||||
enodeString, multiAddrString := parseGenericAddrs(addrs)
|
||||
for _, stringAddr := range multiAddrString {
|
||||
addr, err := multiAddrFromString(stringAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get multiaddr from string")
|
||||
}
|
||||
allAddrs = append(allAddrs, addr)
|
||||
}
|
||||
for _, stringAddr := range enodeString {
|
||||
enodeAddr, err := enode.Parse(enode.ValidSchemes, stringAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get enode from string")
|
||||
}
|
||||
addr, err := convertToSingleMultiAddr(enodeAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get multiaddr")
|
||||
}
|
||||
allAddrs = append(allAddrs, addr)
|
||||
}
|
||||
return allAddrs, nil
|
||||
}
|
||||
|
||||
func multiAddrFromString(address string) (ma.Multiaddr, error) {
|
||||
return ma.NewMultiaddr(address)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/encoder",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -21,11 +21,10 @@ import (
|
||||
type P2P interface {
|
||||
Broadcaster
|
||||
SetStreamHandler
|
||||
EncodingProvider
|
||||
PubSubProvider
|
||||
PubSubTopicUser
|
||||
SenderEncoder
|
||||
PeerManager
|
||||
Sender
|
||||
ConnectionHandler
|
||||
PeersProvider
|
||||
MetadataProvider
|
||||
@@ -59,6 +58,12 @@ type ConnectionHandler interface {
|
||||
connmgr.ConnectionGater
|
||||
}
|
||||
|
||||
// SenderEncoder allows sending functionality from libp2p as well as encoding for requests and responses.
|
||||
type SenderEncoder interface {
|
||||
EncodingProvider
|
||||
Sender
|
||||
}
|
||||
|
||||
// EncodingProvider provides p2p network encoding.
|
||||
type EncodingProvider interface {
|
||||
Encoding() encoder.NetworkEncoding
|
||||
|
||||
@@ -29,7 +29,7 @@ func logIPAddr(id peer.ID, addrs ...ma.Multiaddr) {
|
||||
|
||||
func logExternalIPAddr(id peer.ID, addr string, port uint) {
|
||||
if addr != "" {
|
||||
multiAddr, err := multiAddressBuilder(addr, port)
|
||||
multiAddr, err := MultiAddressBuilder(addr, port)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not create multiaddress")
|
||||
return
|
||||
|
||||
@@ -1,20 +1,39 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
knownAgentVersions = []string{
|
||||
"lighthouse",
|
||||
"nimbus",
|
||||
"prysm",
|
||||
"teku",
|
||||
"js-libp2p",
|
||||
"rust-libp2p",
|
||||
}
|
||||
p2pPeerCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "p2p_peer_count",
|
||||
Help: "The number of peers in a given state.",
|
||||
},
|
||||
[]string{"state"})
|
||||
totalPeerCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "libp2p_peers",
|
||||
Help: "Tracks the total number of libp2p peers",
|
||||
})
|
||||
connectedPeersCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "connected_libp2p_peers",
|
||||
Help: "Tracks the total number of connected libp2p peers by agent string",
|
||||
},
|
||||
[]string{"agent"},
|
||||
)
|
||||
avgScoreConnectedClients = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "connected_libp2p_peers_average_scores",
|
||||
Help: "Tracks the overall p2p scores of connected libp2p peers by agent string",
|
||||
},
|
||||
[]string{"agent"},
|
||||
)
|
||||
repeatPeerConnections = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_repeat_attempts",
|
||||
Help: "The number of repeat attempts the connection handler is triggered for a peer.",
|
||||
@@ -46,10 +65,60 @@ var (
|
||||
)
|
||||
|
||||
func (s *Service) updateMetrics() {
|
||||
totalPeerCount.Set(float64(len(s.peers.Connected())))
|
||||
p2pPeerCount.WithLabelValues("Connected").Set(float64(len(s.peers.Connected())))
|
||||
connectedPeers := s.peers.Connected()
|
||||
p2pPeerCount.WithLabelValues("Connected").Set(float64(len(connectedPeers)))
|
||||
p2pPeerCount.WithLabelValues("Disconnected").Set(float64(len(s.peers.Disconnected())))
|
||||
p2pPeerCount.WithLabelValues("Connecting").Set(float64(len(s.peers.Connecting())))
|
||||
p2pPeerCount.WithLabelValues("Disconnecting").Set(float64(len(s.peers.Disconnecting())))
|
||||
p2pPeerCount.WithLabelValues("Bad").Set(float64(len(s.peers.Bad())))
|
||||
|
||||
store := s.Host().Peerstore()
|
||||
numConnectedPeersByClient := make(map[string]float64)
|
||||
peerScoresByClient := make(map[string][]float64)
|
||||
for i := 0; i < len(connectedPeers); i++ {
|
||||
p := connectedPeers[i]
|
||||
pid, err := peer.Decode(p.String())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not decode peer string")
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the agent data.
|
||||
rawAgent, err := store.Get(pid, "AgentVersion")
|
||||
agent, ok := rawAgent.(string)
|
||||
if err != nil || !ok {
|
||||
agent = "unknown"
|
||||
}
|
||||
foundName := "unknown"
|
||||
for _, knownAgent := range knownAgentVersions {
|
||||
// If the agent string matches one of our known agents, we set
|
||||
// the value to our own, sanitized string.
|
||||
if strings.Contains(strings.ToLower(agent), knownAgent) {
|
||||
foundName = knownAgent
|
||||
}
|
||||
}
|
||||
numConnectedPeersByClient[foundName] += 1
|
||||
|
||||
// Get peer scoring data.
|
||||
overallScore := s.peers.Scorers().Score(pid)
|
||||
peerScoresByClient[foundName] = append(peerScoresByClient[foundName], overallScore)
|
||||
}
|
||||
for agent, total := range numConnectedPeersByClient {
|
||||
connectedPeersCount.WithLabelValues(agent).Set(total)
|
||||
}
|
||||
for agent, scoringData := range peerScoresByClient {
|
||||
avgScore := average(scoringData)
|
||||
avgScoreConnectedClients.WithLabelValues(agent).Set(avgScore)
|
||||
}
|
||||
}
|
||||
|
||||
func average(xs []float64) float64 {
|
||||
if len(xs) == 0 {
|
||||
return 0
|
||||
}
|
||||
total := 0.0
|
||||
for _, v := range xs {
|
||||
total += v
|
||||
}
|
||||
return total / float64(len(xs))
|
||||
}
|
||||
|
||||
@@ -16,10 +16,22 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
)
|
||||
|
||||
// MultiAddressBuilder takes in an ip address string and port to produce a go multiaddr format.
|
||||
func MultiAddressBuilder(ipAddr string, port uint) (ma.Multiaddr, error) {
|
||||
parsedIP := net.ParseIP(ipAddr)
|
||||
if parsedIP.To4() == nil && parsedIP.To16() == nil {
|
||||
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
|
||||
}
|
||||
if parsedIP.To4() != nil {
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
|
||||
}
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/tcp/%d", ipAddr, port))
|
||||
}
|
||||
|
||||
// buildOptions for the libp2p host.
|
||||
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Option {
|
||||
cfg := s.cfg
|
||||
listen, err := multiAddressBuilder(ip.String(), cfg.TCPPort)
|
||||
listen, err := MultiAddressBuilder(ip.String(), cfg.TCPPort)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Failed to p2p listen")
|
||||
}
|
||||
@@ -27,7 +39,7 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
|
||||
if net.ParseIP(cfg.LocalIP) == nil {
|
||||
log.Fatalf("Invalid local ip provided: %s", cfg.LocalIP)
|
||||
}
|
||||
listen, err = multiAddressBuilder(cfg.LocalIP, cfg.TCPPort)
|
||||
listen, err = MultiAddressBuilder(cfg.LocalIP, cfg.TCPPort)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Failed to p2p listen")
|
||||
}
|
||||
@@ -65,7 +77,7 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
|
||||
}
|
||||
if cfg.HostAddress != "" {
|
||||
options = append(options, libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
external, err := multiAddressBuilder(cfg.HostAddress, cfg.TCPPort)
|
||||
external, err := MultiAddressBuilder(cfg.HostAddress, cfg.TCPPort)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Unable to create external multiaddress")
|
||||
} else {
|
||||
@@ -90,17 +102,6 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
|
||||
return options
|
||||
}
|
||||
|
||||
func multiAddressBuilder(ipAddr string, port uint) (ma.Multiaddr, error) {
|
||||
parsedIP := net.ParseIP(ipAddr)
|
||||
if parsedIP.To4() == nil && parsedIP.To16() == nil {
|
||||
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
|
||||
}
|
||||
if parsedIP.To4() != nil {
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
|
||||
}
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/tcp/%d", ipAddr, port))
|
||||
}
|
||||
|
||||
func multiAddressBuilderWithID(ipAddr, protocol string, port uint, id peer.ID) (ma.Multiaddr, error) {
|
||||
parsedIP := net.ParseIP(ipAddr)
|
||||
if parsedIP.To4() == nil && parsedIP.To16() == nil {
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
mock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v3/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/v3/network"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
)
|
||||
@@ -89,7 +90,7 @@ func TestDefaultMultiplexers(t *testing.T) {
|
||||
var err error
|
||||
svc.privKey, err = privKey(svc.cfg)
|
||||
assert.NoError(t, err)
|
||||
ipAddr := ipAddr()
|
||||
ipAddr := network.IPAddr()
|
||||
opts := svc.buildOptions(ipAddr, svc.privKey)
|
||||
err = cfg.Apply(append(opts, libp2p.FallbackDefaults)...)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -7,7 +7,10 @@ go_library(
|
||||
"status.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/peers",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
prysmnetwork "github.com/prysmaticlabs/prysm/v3/network"
|
||||
"github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
@@ -107,7 +108,7 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
|
||||
cfg.Discv5BootStrapAddr = dv5Nodes
|
||||
|
||||
ipAddr := ipAddr()
|
||||
ipAddr := prysmnetwork.IPAddr()
|
||||
s.privKey, err = privKey(s.cfg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to generate p2p private key")
|
||||
@@ -200,8 +201,8 @@ func (s *Service) Start() {
|
||||
}
|
||||
}
|
||||
|
||||
if !s.cfg.NoDiscovery && !s.cfg.DisableDiscv5 {
|
||||
ipAddr := ipAddr()
|
||||
if !s.cfg.NoDiscovery {
|
||||
ipAddr := prysmnetwork.IPAddr()
|
||||
listener, err := s.startDiscoveryV5(
|
||||
ipAddr,
|
||||
s.privKey,
|
||||
@@ -224,7 +225,7 @@ func (s *Service) Start() {
|
||||
s.started = true
|
||||
|
||||
if len(s.cfg.StaticPeers) > 0 {
|
||||
addrs, err := peersFromStringAddrs(s.cfg.StaticPeers)
|
||||
addrs, err := PeersFromStringAddrs(s.cfg.StaticPeers)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not connect to static peer")
|
||||
}
|
||||
@@ -244,9 +245,7 @@ func (s *Service) Start() {
|
||||
})
|
||||
async.RunEvery(s.ctx, 30*time.Minute, s.Peers().Prune)
|
||||
async.RunEvery(s.ctx, params.BeaconNetworkConfig().RespTimeout, s.updateMetrics)
|
||||
async.RunEvery(s.ctx, refreshRate, func() {
|
||||
s.RefreshENR()
|
||||
})
|
||||
async.RunEvery(s.ctx, refreshRate, s.RefreshENR)
|
||||
async.RunEvery(s.ctx, 1*time.Minute, func() {
|
||||
log.WithFields(logrus.Fields{
|
||||
"inbound": len(s.peers.InboundConnected()),
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/types",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd:__subpackages__",
|
||||
"//slasher/rpc:__pkg__",
|
||||
"//testing/util:__pkg__",
|
||||
"//validator/client:__pkg__",
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/wrapper"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v3/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/v3/io/file"
|
||||
"github.com/prysmaticlabs/prysm/v3/network"
|
||||
pb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -129,15 +128,6 @@ func metaDataFromConfig(cfg *Config) (metadata.Metadata, error) {
|
||||
return wrapper.WrappedMetadataV0(metaData), nil
|
||||
}
|
||||
|
||||
// Retrieves an external ipv4 address and converts into a libp2p formatted value.
|
||||
func ipAddr() net.IP {
|
||||
ip, err := network.ExternalIP()
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Could not get IPv4 address")
|
||||
}
|
||||
return net.ParseIP(ip)
|
||||
}
|
||||
|
||||
// Attempt to dial an address to verify its connectivity
|
||||
func verifyConnectivity(addr string, port uint, protocol string) {
|
||||
if addr != "" {
|
||||
|
||||
@@ -6,12 +6,6 @@ datadir: /var/lib/prysm/beacon
|
||||
# http-web3provider: ETH1 API endpoint, eg. http://localhost:8545 for a local geth service on the default port
|
||||
http-web3provider: http://localhost:8545
|
||||
|
||||
# fallback-web3provider: List of backup ETH1 API endpoints, used if above is not working
|
||||
# For example:
|
||||
# fallback-web3provider:
|
||||
# - https://mainnet.infura.io/v3/YOUR-PROJECT-ID
|
||||
# - https://eth-mainnet.alchemyapi.io/v2/YOUR-PROJECT-ID
|
||||
|
||||
|
||||
# Optional tuning parameters
|
||||
# For full list, see https://docs.prylabs.network/docs/prysm-usage/parameters
|
||||
|
||||
@@ -41,6 +41,29 @@ func wrapFeeRecipientsArray(
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// https://ethereum.github.io/beacon-APIs/#/Validator/registerValidator expects posting a top-level array.
|
||||
// We make it more proto-friendly by wrapping it in a struct.
|
||||
func wrapSignedValidatorRegistrationsArray(
|
||||
endpoint *apimiddleware.Endpoint,
|
||||
_ http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
if _, ok := endpoint.PostRequest.(*signedValidatorRegistrationsRequestJson); !ok {
|
||||
return true, nil
|
||||
}
|
||||
registrations := make([]*signedValidatorRegistrationJson, 0)
|
||||
if err := json.NewDecoder(req.Body).Decode(®istrations); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
|
||||
}
|
||||
j := &signedValidatorRegistrationsRequestJson{Registrations: registrations}
|
||||
b, err := json.Marshal(j)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
|
||||
}
|
||||
req.Body = io.NopCloser(bytes.NewReader(b))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// https://ethereum.github.io/beacon-apis/#/Beacon/submitPoolAttestations expects posting a top-level array.
|
||||
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
|
||||
func wrapAttestationsArray(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user