mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
319 Commits
revert-933
...
auth-kiln
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
77de59bf62 | ||
|
|
c1433c5c79 | ||
|
|
4767097c5b | ||
|
|
f39d1d01c4 | ||
|
|
01e9125761 | ||
|
|
8bc1648363 | ||
|
|
02a088d93c | ||
|
|
8cadb2ac6f | ||
|
|
78a90af679 | ||
|
|
be722604f7 | ||
|
|
3bb2acfc7d | ||
|
|
b280e796da | ||
|
|
7719356b69 | ||
|
|
75b9bdba7c | ||
|
|
1e32cd5596 | ||
|
|
72c1720704 | ||
|
|
b6fd9e5315 | ||
|
|
fa1509c970 | ||
|
|
525c818672 | ||
|
|
f9fbda80c2 | ||
|
|
a55fdf8949 | ||
|
|
7f41b69281 | ||
|
|
9a56a5d101 | ||
|
|
c5189a6862 | ||
|
|
5e8c49c871 | ||
|
|
4c7daf7a1f | ||
|
|
b4b976c28b | ||
|
|
c4454cae78 | ||
|
|
1cedf4ba9a | ||
|
|
4ce3da7ecc | ||
|
|
70a6fc4222 | ||
|
|
bb126a9829 | ||
|
|
99deee57d1 | ||
|
|
ced24892a5 | ||
|
|
4c23401a3b | ||
|
|
49f989e342 | ||
|
|
3003f08770 | ||
|
|
0232b5f8f5 | ||
|
|
9636fde1eb | ||
|
|
a424f523a1 | ||
|
|
90a15b2fbe | ||
|
|
0f58c9a925 | ||
|
|
68e75d5851 | ||
|
|
6311cfd8ab | ||
|
|
176ea137ee | ||
|
|
b15cd763b6 | ||
|
|
6dcf47675b | ||
|
|
72aa782849 | ||
|
|
8e78eae897 | ||
|
|
f6883f2aa9 | ||
|
|
19782d2563 | ||
|
|
032cf433c5 | ||
|
|
fa656a86a5 | ||
|
|
cc637bad4a | ||
|
|
5f414b3e82 | ||
|
|
41b8b1a0f8 | ||
|
|
1d36ecb98d | ||
|
|
80cd539297 | ||
|
|
f47b6af910 | ||
|
|
443df77bb3 | ||
|
|
062b2661c8 | ||
|
|
f5719f8c8e | ||
|
|
94fe3884a0 | ||
|
|
f550a964f5 | ||
|
|
c5c039fd6b | ||
|
|
4d9947543f | ||
|
|
7370c42bae | ||
|
|
3c76cc3af5 | ||
|
|
28af5bc601 | ||
|
|
c9f299b50a | ||
|
|
7d6046276d | ||
|
|
9dfb385160 | ||
|
|
b1774efeb7 | ||
|
|
25425899b8 | ||
|
|
a6137cf774 | ||
|
|
2ccc27b1ba | ||
|
|
4b3a723166 | ||
|
|
50f253619e | ||
|
|
658fe3056f | ||
|
|
2598be6afa | ||
|
|
d77eb3533f | ||
|
|
4120abb62e | ||
|
|
7b411789ae | ||
|
|
49eccede3e | ||
|
|
9f7a554f0e | ||
|
|
f971b1728a | ||
|
|
753afb4fb2 | ||
|
|
1835f54197 | ||
|
|
26a2311c82 | ||
|
|
9f419bee7d | ||
|
|
d04eaf8fa0 | ||
|
|
4f77ad20c8 | ||
|
|
1b5a6d4195 | ||
|
|
82ceb51548 | ||
|
|
eae0db383f | ||
|
|
b56bd9e9d8 | ||
|
|
6a2ef13b87 | ||
|
|
72a2dd004b | ||
|
|
481d8847c2 | ||
|
|
42d5416658 | ||
|
|
a1d8833749 | ||
|
|
1a0e16a48b | ||
|
|
bff5c1e1a9 | ||
|
|
695389b7bb | ||
|
|
a67b8610f0 | ||
|
|
3f5ce0cdca | ||
|
|
29eceba4d2 | ||
|
|
4c34e5d424 | ||
|
|
569375286e | ||
|
|
d51b52d432 | ||
|
|
7b5a821c81 | ||
|
|
f89265f2f7 | ||
|
|
c731b715ed | ||
|
|
2a68c69d8f | ||
|
|
a82fd7bf68 | ||
|
|
9c540627ab | ||
|
|
11f50453fc | ||
|
|
5f8b01ccda | ||
|
|
d800107927 | ||
|
|
d3d5cfca0b | ||
|
|
924758a557 | ||
|
|
18ef760ee5 | ||
|
|
14e3e80df3 | ||
|
|
3bea0e7896 | ||
|
|
eedcb529fd | ||
|
|
30e796a4f1 | ||
|
|
ea6ca456e6 | ||
|
|
8455656597 | ||
|
|
af8bafd895 | ||
|
|
c538a6c068 | ||
|
|
4b75b991dd | ||
|
|
8eaf391918 | ||
|
|
cbdb3c9e86 | ||
|
|
d5eb8392b6 | ||
|
|
12754adddc | ||
|
|
08a5155ee3 | ||
|
|
f99a0419ef | ||
|
|
4ad31f9c05 | ||
|
|
4906a0e6de | ||
|
|
168e06e607 | ||
|
|
cf18d5dd42 | ||
|
|
aeb6940935 | ||
|
|
26876d64d7 | ||
|
|
3450923661 | ||
|
|
4020a603b6 | ||
|
|
3a5cfab5f2 | ||
|
|
8e5ba13352 | ||
|
|
e7e2f7850d | ||
|
|
c8748260fd | ||
|
|
a70febbf56 | ||
|
|
55ce1ccc33 | ||
|
|
aba628b56b | ||
|
|
5effb92d11 | ||
|
|
2b55368c99 | ||
|
|
327903b7bb | ||
|
|
77f815a39f | ||
|
|
80dc725412 | ||
|
|
263c18992e | ||
|
|
9e220f9052 | ||
|
|
99878d104c | ||
|
|
a870bf7a74 | ||
|
|
dc42ff382f | ||
|
|
53b78a38a3 | ||
|
|
b45826e731 | ||
|
|
7b59ecac5e | ||
|
|
9149178a9c | ||
|
|
51ef502b04 | ||
|
|
8d891821ee | ||
|
|
762863ce6a | ||
|
|
41f5fa7524 | ||
|
|
09744bac70 | ||
|
|
f5db847237 | ||
|
|
8600f70b0b | ||
|
|
6fe430de44 | ||
|
|
42a5f96d3f | ||
|
|
e7f0fcf202 | ||
|
|
5ae564f1bf | ||
|
|
719109c219 | ||
|
|
64533a4b0c | ||
|
|
9fecd761d7 | ||
|
|
f84c95667c | ||
|
|
9af081797e | ||
|
|
e4e9f12c8b | ||
|
|
2f4e8beae6 | ||
|
|
81c7b90d26 | ||
|
|
dd3d65ff18 | ||
|
|
ac5a227aeb | ||
|
|
33f4d5c3cc | ||
|
|
67d7f8baee | ||
|
|
3c54aef7b1 | ||
|
|
938c28c42e | ||
|
|
8ddb2c26c4 | ||
|
|
cf0e78c2f6 | ||
|
|
4c0b262fdc | ||
|
|
33e675e204 | ||
|
|
e599f6a8a1 | ||
|
|
49c9ab9fda | ||
|
|
f90dec287b | ||
|
|
12c36cff9d | ||
|
|
bc565d9ee6 | ||
|
|
db67d5bad8 | ||
|
|
3bc0c2be54 | ||
|
|
1bed9ef749 | ||
|
|
ec772beeaf | ||
|
|
56407dde02 | ||
|
|
445f17881e | ||
|
|
183d40d8f1 | ||
|
|
87bc6aa5e5 | ||
|
|
5b5065b01d | ||
|
|
ee1c567561 | ||
|
|
ff1416c98d | ||
|
|
471c94031f | ||
|
|
9863fb3d6a | ||
|
|
f3c2d1a00b | ||
|
|
5d8879a4df | ||
|
|
abea0a11bc | ||
|
|
80ce1603bd | ||
|
|
ca478244e0 | ||
|
|
8a864b66a1 | ||
|
|
72f3b9e84b | ||
|
|
493e95060f | ||
|
|
e7e1ecd72f | ||
|
|
c286ac8b87 | ||
|
|
bde315224c | ||
|
|
00520705bc | ||
|
|
c7fcd804d7 | ||
|
|
985ac2e848 | ||
|
|
f4a0e98926 | ||
|
|
5f93ff10ea | ||
|
|
544248f60f | ||
|
|
3b41968510 | ||
|
|
7fc418042a | ||
|
|
9a03946706 | ||
|
|
33dd6dd5f2 | ||
|
|
56542e1958 | ||
|
|
e82d7b4c0b | ||
|
|
6cb69d8ff0 | ||
|
|
70b55a0191 | ||
|
|
50f4951194 | ||
|
|
1a14f2368d | ||
|
|
bb8cad58f1 | ||
|
|
05412c1f0e | ||
|
|
b03441fed8 | ||
|
|
fa7d7cef69 | ||
|
|
1caa6c969f | ||
|
|
eeb7d5bbfb | ||
|
|
d7c7d150b1 | ||
|
|
63c4d2eb2b | ||
|
|
9de1f694a0 | ||
|
|
8a79d06cbd | ||
|
|
5290ad93b8 | ||
|
|
2128208ef7 | ||
|
|
296323719c | ||
|
|
5e9583ea85 | ||
|
|
17196e0f80 | ||
|
|
c50d54000d | ||
|
|
85b3061d1b | ||
|
|
0146c5317a | ||
|
|
fcbc48ffd9 | ||
|
|
76ee51af9d | ||
|
|
370b0b97ed | ||
|
|
990ebd3fe3 | ||
|
|
54449c72e8 | ||
|
|
1dbd0b98eb | ||
|
|
09c3896c6b | ||
|
|
d494845e19 | ||
|
|
4d0c0f7234 | ||
|
|
bfe570b1aa | ||
|
|
56db696823 | ||
|
|
d312e15db8 | ||
|
|
907d4cf7e6 | ||
|
|
891353d6ad | ||
|
|
0adc08660c | ||
|
|
de31425dcd | ||
|
|
2094e0f21f | ||
|
|
2c6f554500 | ||
|
|
18a1e07711 | ||
|
|
5e432f5aaa | ||
|
|
284e2696cb | ||
|
|
7547aaa6ce | ||
|
|
953315c2cc | ||
|
|
9662d06b08 | ||
|
|
ecaea26ace | ||
|
|
63819e2690 | ||
|
|
a6d0cd06b3 | ||
|
|
2dbe4f5e67 | ||
|
|
2689d6814d | ||
|
|
69a681ddc0 | ||
|
|
7f9f1fd36c | ||
|
|
57c97eb561 | ||
|
|
f0f94a8193 | ||
|
|
87b0bf2c2a | ||
|
|
d8ad317dec | ||
|
|
ab5f488cf4 | ||
|
|
296d7464ad | ||
|
|
221c542e4f | ||
|
|
7ad32aaa96 | ||
|
|
3dc0969c0c | ||
|
|
0e18e835c3 | ||
|
|
8adfbfc382 | ||
|
|
68b0b5e0ce | ||
|
|
eede309e0f | ||
|
|
b11628dc53 | ||
|
|
ea3ae22d3b | ||
|
|
02bb39ddeb | ||
|
|
1618c1f55d | ||
|
|
73c8493fd7 | ||
|
|
a4f59a4f15 | ||
|
|
3c497efdb8 | ||
|
|
9f5daafbb7 | ||
|
|
11d7ffdfa8 | ||
|
|
c26b3305e6 | ||
|
|
38d8b63fbf | ||
|
|
aea67405c8 | ||
|
|
57d830f8b3 | ||
|
|
ac4b1ef4ea | ||
|
|
1d32119f5a | ||
|
|
3540cc7b05 | ||
|
|
191e7767a6 |
16
WORKSPACE
16
WORKSPACE
@@ -183,7 +183,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.16.4",
|
||||
go_version = "1.17.6",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -222,7 +222,7 @@ filegroup(
|
||||
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.1.8"
|
||||
consensus_spec_version = "v1.1.9"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -238,7 +238,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "e4d2b7830e85734442d7172887dcd4edc0985d6256bafedb3353ab477a1433c0",
|
||||
sha256 = "207d9c326ba4fa1f34bab7b6169201c32f2611755db030909a3405873445e0ba",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -254,7 +254,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "4a88d01ad12260220ab5c8efdeec6534bac48a47f29ba4f7977ea14c9d07b0fe",
|
||||
sha256 = "a3995b39f412db236b2f1db909f288218da53cb53b9923b71dda9d144d68f40a",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -270,7 +270,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "0033fe107d9d2adb8d4fcb60dfb1c43fc5a54f0af970525c962124221757c266",
|
||||
sha256 = "76cea7a4c8e32d458ad456b54bfbb30bc772481a91954a4cd97e229aa3023b1d",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -285,7 +285,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "3fc3b8809d140a1ab61350fbd410f33add2851a63829d874dcb620babba603de",
|
||||
sha256 = "0fc429684775f943250dce1f9c485ac25e26c6395d7f585c8d1317becec2ace7",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -349,9 +349,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "f196fe4367c2d2d01d36565c0dc6eecfa4f03adba1fc03a61d62953fce606e1f",
|
||||
sha256 = "4797a7e594a5b1f4c1c8080701613f3ee451b01ec0861499ea7d9b60877a6b23",
|
||||
urls = [
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.2/prysm-web-ui.tar.gz",
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.3/prysm-web-ui.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -12,7 +12,9 @@ go_library(
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"new_slot.go",
|
||||
"optimistic_sync.go",
|
||||
"options.go",
|
||||
"pow_block.go",
|
||||
"process_attestation.go",
|
||||
"process_attestation_helpers.go",
|
||||
"process_block.go",
|
||||
@@ -37,6 +39,7 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
@@ -53,6 +56,7 @@ go_library(
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/powchain/engine-api-client/v1:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
@@ -62,6 +66,7 @@ go_library(
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
@@ -70,6 +75,9 @@ go_library(
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_emicklei_dot//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
@@ -100,7 +108,10 @@ go_test(
|
||||
"init_test.go",
|
||||
"log_test.go",
|
||||
"metrics_test.go",
|
||||
"mock_engine_test.go",
|
||||
"mock_test.go",
|
||||
"optimistic_sync_test.go",
|
||||
"pow_block_test.go",
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
"receive_attestation_test.go",
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -36,7 +37,7 @@ type TimeFetcher interface {
|
||||
|
||||
// GenesisFetcher retrieves the Ethereum consensus data related to its genesis.
|
||||
type GenesisFetcher interface {
|
||||
GenesisValidatorRoot() [32]byte
|
||||
GenesisValidatorsRoot() [32]byte
|
||||
}
|
||||
|
||||
// HeadFetcher defines a common interface for methods in blockchain service which
|
||||
@@ -48,12 +49,14 @@ type HeadFetcher interface {
|
||||
HeadState(ctx context.Context) (state.BeaconState, error)
|
||||
HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error)
|
||||
HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error)
|
||||
HeadGenesisValidatorRoot() [32]byte
|
||||
HeadGenesisValidatorsRoot() [32]byte
|
||||
HeadETH1Data() *ethpb.Eth1Data
|
||||
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool)
|
||||
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error)
|
||||
ProtoArrayStore() *protoarray.Store
|
||||
ChainHeads() ([][32]byte, []types.Slot)
|
||||
IsOptimistic(ctx context.Context) (bool, error)
|
||||
IsOptimisticForRoot(ctx context.Context, root [32]byte, slot types.Slot) (bool, error)
|
||||
HeadSyncCommitteeFetcher
|
||||
HeadDomainFetcher
|
||||
}
|
||||
@@ -212,8 +215,8 @@ func (s *Service) HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, er
|
||||
return helpers.Seed(s.headState(ctx), epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
}
|
||||
|
||||
// HeadGenesisValidatorRoot returns genesis validator root of the head state.
|
||||
func (s *Service) HeadGenesisValidatorRoot() [32]byte {
|
||||
// HeadGenesisValidatorsRoot returns genesis validators root of the head state.
|
||||
func (s *Service) HeadGenesisValidatorsRoot() [32]byte {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
@@ -221,7 +224,7 @@ func (s *Service) HeadGenesisValidatorRoot() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
return s.headGenesisValidatorRoot()
|
||||
return s.headGenesisValidatorsRoot()
|
||||
}
|
||||
|
||||
// HeadETH1Data returns the eth1data of the current head state.
|
||||
@@ -245,16 +248,16 @@ func (s *Service) GenesisTime() time.Time {
|
||||
return s.genesisTime
|
||||
}
|
||||
|
||||
// GenesisValidatorRoot returns the genesis validator
|
||||
// GenesisValidatorsRoot returns the genesis validator
|
||||
// root of the chain.
|
||||
func (s *Service) GenesisValidatorRoot() [32]byte {
|
||||
func (s *Service) GenesisValidatorsRoot() [32]byte {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return [32]byte{}
|
||||
}
|
||||
return bytesutil.ToBytes32(s.head.state.GenesisValidatorRoot())
|
||||
return bytesutil.ToBytes32(s.head.state.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
// CurrentFork retrieves the latest fork information of the beacon chain.
|
||||
@@ -328,6 +331,23 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index types.V
|
||||
return v.PublicKey(), nil
|
||||
}
|
||||
|
||||
// IsOptimistic returns true if the current head is optimistic.
|
||||
func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
if slots.ToEpoch(s.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return s.cfg.ForkChoiceStore.Optimistic(ctx, s.head.root, s.head.slot)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root and slot as aguments instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte, slot types.Slot) (bool, error) {
|
||||
return s.cfg.ForkChoiceStore.Optimistic(ctx, root, slot)
|
||||
}
|
||||
|
||||
// SetGenesisTime sets the genesis time of beacon chain.
|
||||
func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t
|
||||
|
||||
@@ -185,15 +185,15 @@ func TestCurrentFork_NilHeadSTate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenesisValidatorRoot_CanRetrieve(t *testing.T) {
|
||||
func TestGenesisValidatorsRoot_CanRetrieve(t *testing.T) {
|
||||
// Should not panic if head state is nil.
|
||||
c := &Service{}
|
||||
assert.Equal(t, [32]byte{}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
|
||||
assert.Equal(t, [32]byte{}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
|
||||
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
|
||||
require.NoError(t, err)
|
||||
c.head = &head{state: s}
|
||||
assert.Equal(t, [32]byte{'a'}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
|
||||
assert.Equal(t, [32]byte{'a'}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
|
||||
}
|
||||
|
||||
func TestHeadETH1Data_Nil(t *testing.T) {
|
||||
@@ -265,17 +265,17 @@ func TestService_HeadSeed(t *testing.T) {
|
||||
require.DeepEqual(t, seed, root)
|
||||
}
|
||||
|
||||
func TestService_HeadGenesisValidatorRoot(t *testing.T) {
|
||||
func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 1)
|
||||
c := &Service{}
|
||||
|
||||
c.head = &head{}
|
||||
root := c.HeadGenesisValidatorRoot()
|
||||
root := c.HeadGenesisValidatorsRoot()
|
||||
require.Equal(t, [32]byte{}, root)
|
||||
|
||||
c.head = &head{state: s}
|
||||
root = c.HeadGenesisValidatorRoot()
|
||||
require.DeepEqual(t, root[:], s.GenesisValidatorRoot())
|
||||
root = c.HeadGenesisValidatorsRoot()
|
||||
require.DeepEqual(t, root[:], s.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
func TestService_ProtoArrayStore(t *testing.T) {
|
||||
@@ -355,3 +355,38 @@ func TestService_HeadValidatorIndexToPublicKeyNil(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p)
|
||||
}
|
||||
|
||||
func TestService_IsOptimistic(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.BellatrixForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0))
|
||||
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, opt)
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{genesisTime: time.Now()}
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, opt)
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0))
|
||||
|
||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'}, 100)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, opt)
|
||||
}
|
||||
|
||||
@@ -265,10 +265,10 @@ func (s *Service) headState(ctx context.Context) state.BeaconState {
|
||||
return s.head.state.Copy()
|
||||
}
|
||||
|
||||
// This returns the genesis validator root of the head state.
|
||||
// This returns the genesis validators root of the head state.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headGenesisValidatorRoot() [32]byte {
|
||||
return bytesutil.ToBytes32(s.head.state.GenesisValidatorRoot())
|
||||
func (s *Service) headGenesisValidatorsRoot() [32]byte {
|
||||
return bytesutil.ToBytes32(s.head.state.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
// This returns the validator referenced by the provided index in
|
||||
|
||||
@@ -130,7 +130,7 @@ func (s *Service) domainWithHeadState(ctx context.Context, slot types.Slot, doma
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return signing.Domain(headState.Fork(), slots.ToEpoch(headState.Slot()), domain, headState.GenesisValidatorRoot())
|
||||
return signing.Domain(headState.Fork(), slots.ToEpoch(headState.Slot()), domain, headState.GenesisValidatorsRoot())
|
||||
}
|
||||
|
||||
// returns the head state that is advanced up to `slot`. It utilizes the cache `syncCommitteeHeadState` by retrieving using `slot` as key.
|
||||
|
||||
@@ -122,7 +122,7 @@ func TestService_HeadSyncCommitteeDomain(t *testing.T) {
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorRoot())
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncCommitteeDomain(context.Background(), 0)
|
||||
@@ -136,7 +136,7 @@ func TestService_HeadSyncContributionProofDomain(t *testing.T) {
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorRoot())
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncContributionProofDomain(context.Background(), 0)
|
||||
@@ -150,7 +150,7 @@ func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorRoot())
|
||||
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncSelectionProofDomain(context.Background(), 0)
|
||||
|
||||
48
beacon-chain/blockchain/mock_engine_test.go
Normal file
48
beacon-chain/blockchain/mock_engine_test.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
)
|
||||
|
||||
type mockEngineService struct {
|
||||
blks map[[32]byte]*enginev1.ExecutionBlock
|
||||
}
|
||||
|
||||
func (*mockEngineService) NewPayload(context.Context, *enginev1.ExecutionPayload) (*enginev1.PayloadStatus, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (*mockEngineService) ForkchoiceUpdated(context.Context, *enginev1.ForkchoiceState, *enginev1.PayloadAttributes) (*v1.ForkchoiceUpdatedResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (*mockEngineService) GetPayloadV1(
|
||||
_ context.Context, _ enginev1.PayloadIDBytes,
|
||||
) *enginev1.ExecutionPayload {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*mockEngineService) GetPayload(context.Context, [8]byte) (*enginev1.ExecutionPayload, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (*mockEngineService) ExchangeTransitionConfiguration(context.Context, *enginev1.TransitionConfiguration) (*enginev1.TransitionConfiguration, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (*mockEngineService) LatestExecutionBlock(context.Context) (*enginev1.ExecutionBlock, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockEngineService) ExecutionBlockByHash(_ context.Context, hash common.Hash) (*enginev1.ExecutionBlock, error) {
|
||||
blk, ok := m.blks[common.BytesToHash(hash.Bytes())]
|
||||
if !ok {
|
||||
return nil, errors.New("block not found")
|
||||
}
|
||||
return blk, nil
|
||||
}
|
||||
53
beacon-chain/blockchain/optimistic_sync.go
Normal file
53
beacon-chain/blockchain/optimistic_sync.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
)
|
||||
|
||||
// optimisticCandidateBlock returns true if this block can be optimistically synced.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def is_optimistic_candidate_block(opt_store: OptimisticStore, current_slot: Slot, block: BeaconBlock) -> bool:
|
||||
// justified_root = opt_store.block_states[opt_store.head_block_root].current_justified_checkpoint.root
|
||||
// justified_is_execution_block = is_execution_block(opt_store.blocks[justified_root])
|
||||
// block_is_deep = block.slot + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY <= current_slot
|
||||
// return justified_is_execution_block or block_is_deep
|
||||
func (s *Service) optimisticCandidateBlock(ctx context.Context, blk block.BeaconBlock) (bool, error) {
|
||||
if blk.Slot()+params.BeaconConfig().SafeSlotsToImportOptimistically <= s.CurrentSlot() {
|
||||
return true, nil
|
||||
}
|
||||
j := s.store.JustifiedCheckpt()
|
||||
if j == nil {
|
||||
return false, errNilJustifiedInStore
|
||||
}
|
||||
jBlock, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(j.Root))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return blocks.ExecutionBlock(jBlock.Block().Body())
|
||||
}
|
||||
|
||||
// loadSyncedTips loads a previously saved synced Tips from DB
|
||||
// if no synced tips are saved, then it creates one from the given
|
||||
// root and slot number.
|
||||
func (s *Service) loadSyncedTips(root [32]byte, slot types.Slot) error {
|
||||
// Initialize synced tips
|
||||
tips, err := s.cfg.BeaconDB.ValidatedTips(s.ctx)
|
||||
if err != nil || len(tips) == 0 {
|
||||
tips[root] = slot
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Could not read synced tips from DB, using finalized checkpoint as synced tip")
|
||||
}
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.SetSyncedTips(tips); err != nil {
|
||||
return errors.Wrap(err, "could not set synced tips")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
141
beacon-chain/blockchain/optimistic_sync_test.go
Normal file
141
beacon-chain/blockchain/optimistic_sync_test.go
Normal file
@@ -0,0 +1,141 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
func Test_IsOptimisticCandidateBlock(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
params.BeaconConfig().SafeSlotsToImportOptimistically = 128
|
||||
service.genesisTime = time.Now().Add(-time.Second * 12 * 2 * 128)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blk block.BeaconBlock
|
||||
justified block.SignedBeaconBlock
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "deep block",
|
||||
blk: func(tt *testing.T) block.BeaconBlock {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 1
|
||||
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
justified: func(tt *testing.T) block.SignedBeaconBlock {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 32
|
||||
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "shallow block, Altair justified chkpt",
|
||||
blk: func(tt *testing.T) block.BeaconBlock {
|
||||
blk := util.NewBeaconBlockAltair()
|
||||
blk.Block.Slot = 200
|
||||
wr, err := wrapper.WrappedAltairBeaconBlock(blk.Block)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
justified: func(tt *testing.T) block.SignedBeaconBlock {
|
||||
blk := util.NewBeaconBlockAltair()
|
||||
blk.Block.Slot = 32
|
||||
wr, err := wrapper.WrappedAltairSignedBeaconBlock(blk)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "shallow block, Bellatrix justified chkpt without execution",
|
||||
blk: func(tt *testing.T) block.BeaconBlock {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 200
|
||||
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
justified: func(tt *testing.T) block.SignedBeaconBlock {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 32
|
||||
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "shallow block, execution enabled justified chkpt",
|
||||
blk: func(tt *testing.T) block.BeaconBlock {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 200
|
||||
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
justified: func(tt *testing.T) block.SignedBeaconBlock {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 32
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
blk.Block.Body.ExecutionPayload.FeeRecipient = bytesutil.PadTo([]byte{'a'}, fieldparams.FeeRecipientLength)
|
||||
blk.Block.Body.ExecutionPayload.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
blk.Block.Body.ExecutionPayload.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
blk.Block.Body.ExecutionPayload.LogsBloom = bytesutil.PadTo([]byte{'a'}, fieldparams.LogsBloomLength)
|
||||
blk.Block.Body.ExecutionPayload.Random = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
blk.Block.Body.ExecutionPayload.BaseFeePerGas = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
blk.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
wr, err := wrapper.WrappedBellatrixSignedBeaconBlock(blk)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
jroot, err := tt.justified.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, tt.justified))
|
||||
service.store.SetJustifiedCheckpt(
|
||||
ðpb.Checkpoint{
|
||||
Root: jroot[:],
|
||||
Epoch: slots.ToEpoch(tt.justified.Block().Slot()),
|
||||
})
|
||||
candidate, err := service.optimisticCandidateBlock(ctx, tt.blk)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.want, candidate, tt.name)
|
||||
}
|
||||
}
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -50,6 +51,14 @@ func WithChainStartFetcher(f powchain.ChainStartFetcher) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithExecutionEngineCaller to call execution engine.
|
||||
func WithExecutionEngineCaller(c v1.EngineCaller) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.ExecutionEngineCaller = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDepositCache for deposit lifecycle after chain inclusion.
|
||||
func WithDepositCache(c *depositcache.DepositCache) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
141
beacon-chain/blockchain/pow_block.go
Normal file
141
beacon-chain/blockchain/pow_block.go
Normal file
@@ -0,0 +1,141 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// validateMergeBlock validates terminal block hash in the event of manual overrides before checking for total difficulty.
|
||||
//
|
||||
// def validate_merge_block(block: BeaconBlock) -> None:
|
||||
// if TERMINAL_BLOCK_HASH != Hash32():
|
||||
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
|
||||
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
|
||||
// return
|
||||
//
|
||||
// pow_block = get_pow_block(block.body.execution_payload.parent_hash)
|
||||
// # Check if `pow_block` is available
|
||||
// assert pow_block is not None
|
||||
// pow_parent = get_pow_block(pow_block.parent_hash)
|
||||
// # Check if `pow_parent` is available
|
||||
// assert pow_parent is not None
|
||||
// # Check if `pow_block` is a valid terminal PoW block
|
||||
// assert is_valid_terminal_pow_block(pow_block, pow_parent)
|
||||
func (s *Service) validateMergeBlock(ctx context.Context, b block.SignedBeaconBlock) error {
|
||||
if err := helpers.BeaconBlockIsNil(b); err != nil {
|
||||
return err
|
||||
}
|
||||
payload, err := b.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload == nil {
|
||||
return errors.New("nil execution payload")
|
||||
}
|
||||
if err := validateTerminalBlockHash(b.Block().Slot(), payload); err != nil {
|
||||
return errors.Wrap(err, "could not validate terminal block hash")
|
||||
}
|
||||
mergeBlockParentHash, mergeBlockTD, err := s.getBlkParentHashAndTD(ctx, payload.ParentHash)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get merge block parent hash and total difficulty")
|
||||
}
|
||||
_, mergeBlockParentTD, err := s.getBlkParentHashAndTD(ctx, mergeBlockParentHash)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get merge parent block total difficulty")
|
||||
}
|
||||
valid, err := validateTerminalBlockDifficulties(mergeBlockTD, mergeBlockParentTD)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !valid {
|
||||
return fmt.Errorf("invalid TTD, configTTD: %s, currentTTD: %s, parentTTD: %s",
|
||||
params.BeaconConfig().TerminalTotalDifficulty, mergeBlockTD, mergeBlockParentTD)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Block().Slot(),
|
||||
"mergeBlockHash": common.BytesToHash(payload.ParentHash).String(),
|
||||
"mergeBlockParentHash": common.BytesToHash(mergeBlockParentHash).String(),
|
||||
"terminalTotalDifficulty": params.BeaconConfig().TerminalTotalDifficulty,
|
||||
"mergeBlockTotalDifficulty": mergeBlockTD,
|
||||
"mergeBlockParentTotalDifficulty": mergeBlockParentTD,
|
||||
}).Info("Validated terminal block")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBlkParentHashAndTD retrieves the parent hash and total difficulty of the given block.
|
||||
func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]byte, *uint256.Int, error) {
|
||||
blk, err := s.cfg.ExecutionEngineCaller.ExecutionBlockByHash(ctx, common.BytesToHash(blkHash))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get pow block")
|
||||
}
|
||||
if blk == nil {
|
||||
return nil, nil, errors.New("pow block is nil")
|
||||
}
|
||||
blkTDBig, err := hexutil.DecodeBig(blk.TotalDifficulty)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not decode merge block total difficulty")
|
||||
}
|
||||
blkTDUint256, overflows := uint256.FromBig(blkTDBig)
|
||||
if overflows {
|
||||
return nil, nil, errors.New("total difficulty overflows")
|
||||
}
|
||||
return blk.ParentHash, blkTDUint256, nil
|
||||
}
|
||||
|
||||
// validateTerminalBlockHash validates if the merge block is a valid terminal PoW block.
|
||||
// spec code:
|
||||
// if TERMINAL_BLOCK_HASH != Hash32():
|
||||
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
|
||||
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
|
||||
// return
|
||||
func validateTerminalBlockHash(blkSlot types.Slot, payload *enginev1.ExecutionPayload) error {
|
||||
if bytesutil.ToBytes32(params.BeaconConfig().TerminalBlockHash.Bytes()) == [32]byte{} {
|
||||
return nil
|
||||
}
|
||||
if params.BeaconConfig().TerminalBlockHashActivationEpoch > slots.ToEpoch(blkSlot) {
|
||||
return errors.New("terminal block hash activation epoch not reached")
|
||||
}
|
||||
if !bytes.Equal(payload.ParentHash, params.BeaconConfig().TerminalBlockHash.Bytes()) {
|
||||
return errors.New("parent hash does not match terminal block hash")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateTerminalBlockDifficulties validates terminal pow block by comparing own total difficulty with parent's total difficulty.
|
||||
//
|
||||
// def is_valid_terminal_pow_block(block: PowBlock, parent: PowBlock) -> bool:
|
||||
// is_total_difficulty_reached = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
|
||||
// is_parent_total_difficulty_valid = parent.total_difficulty < TERMINAL_TOTAL_DIFFICULTY
|
||||
// return is_total_difficulty_reached and is_parent_total_difficulty_valid
|
||||
func validateTerminalBlockDifficulties(currentDifficulty *uint256.Int, parentDifficulty *uint256.Int) (bool, error) {
|
||||
b, ok := new(big.Int).SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
|
||||
if !ok {
|
||||
return false, errors.New("failed to parse terminal total difficulty")
|
||||
}
|
||||
ttd, of := uint256.FromBig(b)
|
||||
if of {
|
||||
return false, errors.New("overflow terminal total difficulty")
|
||||
}
|
||||
totalDifficultyReached := currentDifficulty.Cmp(ttd) >= 0
|
||||
parentTotalDifficultyValid := ttd.Cmp(parentDifficulty) > 0
|
||||
return totalDifficultyReached && parentTotalDifficultyValid, nil
|
||||
}
|
||||
210
beacon-chain/blockchain/pow_block_test.go
Normal file
210
beacon-chain/blockchain/pow_block_test.go
Normal file
@@ -0,0 +1,210 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/holiman/uint256"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func Test_validTerminalPowBlock(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
currentDifficulty *uint256.Int
|
||||
parentDifficulty *uint256.Int
|
||||
ttd uint64
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "current > ttd, parent > ttd",
|
||||
currentDifficulty: uint256.NewInt(2),
|
||||
parentDifficulty: uint256.NewInt(2),
|
||||
ttd: 1,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "current < ttd, parent < ttd",
|
||||
currentDifficulty: uint256.NewInt(2),
|
||||
parentDifficulty: uint256.NewInt(2),
|
||||
ttd: 3,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "current == ttd, parent == ttd",
|
||||
currentDifficulty: uint256.NewInt(2),
|
||||
parentDifficulty: uint256.NewInt(2),
|
||||
ttd: 2,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "current > ttd, parent == ttd",
|
||||
currentDifficulty: uint256.NewInt(2),
|
||||
parentDifficulty: uint256.NewInt(1),
|
||||
ttd: 1,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "current == ttd, parent < ttd",
|
||||
currentDifficulty: uint256.NewInt(2),
|
||||
parentDifficulty: uint256.NewInt(1),
|
||||
ttd: 2,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "current > ttd, parent < ttd",
|
||||
currentDifficulty: uint256.NewInt(3),
|
||||
parentDifficulty: uint256.NewInt(1),
|
||||
ttd: 2,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalTotalDifficulty = fmt.Sprint(tt.ttd)
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
got, err := validateTerminalBlockDifficulties(tt.currentDifficulty, tt.parentDifficulty)
|
||||
require.NoError(t, err)
|
||||
if got != tt.want {
|
||||
t.Errorf("validateTerminalBlockDifficulties() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_validTerminalPowBlockSpecConfig(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalTotalDifficulty = "115792089237316195423570985008687907853269984665640564039457584007913129638912"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
i, _ := new(big.Int).SetString("115792089237316195423570985008687907853269984665640564039457584007913129638912", 10)
|
||||
current, of := uint256.FromBig(i)
|
||||
require.Equal(t, of, false)
|
||||
i, _ = new(big.Int).SetString("115792089237316195423570985008687907853269984665640564039457584007913129638911", 10)
|
||||
parent, of := uint256.FromBig(i)
|
||||
require.Equal(t, of, false)
|
||||
|
||||
got, err := validateTerminalBlockDifficulties(current, parent)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, got)
|
||||
}
|
||||
|
||||
func Test_validateMergeBlock(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalTotalDifficulty = "2"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
engine := &mockEngineService{blks: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
engine.blks[[32]byte{'a'}] = &enginev1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
engine.blks[[32]byte{'b'}] = &enginev1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.validateMergeBlock(ctx, b))
|
||||
|
||||
cfg.TerminalTotalDifficulty = "1"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
require.ErrorContains(t, "invalid TTD, configTTD: 1, currentTTD: 2, parentTTD: 1", service.validateMergeBlock(ctx, b))
|
||||
}
|
||||
|
||||
func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
engine := &mockEngineService{blks: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
h := [32]byte{'a'}
|
||||
p := [32]byte{'b'}
|
||||
td := "0x1"
|
||||
engine.blks[h] = &enginev1.ExecutionBlock{
|
||||
ParentHash: p[:],
|
||||
TotalDifficulty: td,
|
||||
}
|
||||
parentHash, totalDifficulty, err := service.getBlkParentHashAndTD(ctx, h[:])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, p, bytesutil.ToBytes32(parentHash))
|
||||
require.Equal(t, td, totalDifficulty.String())
|
||||
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, []byte{'c'})
|
||||
require.ErrorContains(t, "could not get pow block: block not found", err)
|
||||
|
||||
engine.blks[h] = nil
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
||||
require.ErrorContains(t, "pow block is nil", err)
|
||||
|
||||
engine.blks[h] = &enginev1.ExecutionBlock{
|
||||
ParentHash: p[:],
|
||||
TotalDifficulty: "1",
|
||||
}
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
||||
require.ErrorContains(t, "could not decode merge block total difficulty: hex string without 0x prefix", err)
|
||||
|
||||
engine.blks[h] = &enginev1.ExecutionBlock{
|
||||
ParentHash: p[:],
|
||||
TotalDifficulty: "0XFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
|
||||
}
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
||||
require.ErrorContains(t, "could not decode merge block total difficulty: hex number > 256 bits", err)
|
||||
}
|
||||
|
||||
func Test_validateTerminalBlockHash(t *testing.T) {
|
||||
require.NoError(t, validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalBlockHash = [32]byte{0x01}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
require.ErrorContains(t, "terminal block hash activation epoch not reached", validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
||||
|
||||
cfg.TerminalBlockHashActivationEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
require.ErrorContains(t, "parent hash does not match terminal block hash", validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
||||
|
||||
require.NoError(t, validateTerminalBlockHash(1, &enginev1.ExecutionPayload{ParentHash: cfg.TerminalBlockHash.Bytes()}))
|
||||
}
|
||||
@@ -1,11 +1,16 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
@@ -17,11 +22,14 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/tracing"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -97,12 +105,76 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO_MERGE: Optimize this copy.
|
||||
copiedPreState := preState.Copy()
|
||||
|
||||
body := signed.Block().Body()
|
||||
// TODO_MERGE: Break `ExecuteStateTransition` into per_slot and block processing so we can call `ExecutePayload` in the middle.
|
||||
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
// TODO_MERGE: Notify execution client in the event of invalid conensus block
|
||||
return err
|
||||
}
|
||||
|
||||
fullyValidated := false
|
||||
if copiedPreState.Version() == version.Bellatrix || postState.Version() == version.Bellatrix {
|
||||
executionEnabled, err := blocks.ExecutionEnabled(postState, body)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if execution is enabled")
|
||||
}
|
||||
if executionEnabled {
|
||||
payload, err := body.ExecutionPayload()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get body execution payload")
|
||||
}
|
||||
// This is not the earliest we can call `ExecutePayload`, see above to do as the soonest we can call is after per_slot processing.
|
||||
status, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"status:": status.Status,
|
||||
"hash:": fmt.Sprintf("%#x", payload.BlockHash),
|
||||
"parentHash": fmt.Sprintf("%#x", payload.ParentHash),
|
||||
}).Info("Successfully called newPayload")
|
||||
|
||||
switch status.Status {
|
||||
case enginev1.PayloadStatus_INVALID, enginev1.PayloadStatus_INVALID_BLOCK_HASH, enginev1.PayloadStatus_INVALID_TERMINAL_BLOCK:
|
||||
// TODO_MERGE walk up the parent chain removing
|
||||
return fmt.Errorf("could not prcess execution payload with status : %v", status.Status)
|
||||
case enginev1.PayloadStatus_SYNCING, enginev1.PayloadStatus_ACCEPTED:
|
||||
candidate, err := s.optimisticCandidateBlock(ctx, b)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if block is optimistic candidate")
|
||||
}
|
||||
if !candidate {
|
||||
return errors.New("could not optimistically sync block")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Slot(),
|
||||
"root": fmt.Sprintf("%#x", bytesutil.Trunc(blockRoot[:])),
|
||||
"payloadHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
}).Info("Block is optimistic candidate")
|
||||
break
|
||||
case enginev1.PayloadStatus_VALID:
|
||||
fullyValidated = true
|
||||
default:
|
||||
return errors.New("unknown payload status")
|
||||
}
|
||||
if fullyValidated {
|
||||
mergeBlock, err := blocks.MergeTransitionBlock(copiedPreState, body)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if merge block is terminal")
|
||||
}
|
||||
if mergeBlock {
|
||||
if err := s.validateTerminalBlock(ctx, signed); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We add a proposer score boost to fork choice for the block root if applicable, right after
|
||||
// running a successful state transition for the block.
|
||||
if err := s.cfg.ForkChoiceStore.BoostProposerRoot(
|
||||
@@ -115,6 +187,20 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
return err
|
||||
}
|
||||
|
||||
// update forkchoice synced tips if the block is not optimistic
|
||||
if postState.Version() == version.Bellatrix || fullyValidated {
|
||||
root, err := b.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateSyncedTipsWithValidRoot(ctx, root); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.saveSyncedTipsDB(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// If slasher is configured, forward the attestations in the block via
|
||||
// an event feed for processing.
|
||||
if features.Get().EnableSlasher {
|
||||
@@ -175,6 +261,65 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
|
||||
// Notify execution layer with fork choice head update if this is post merge block.
|
||||
if postState.Version() == version.Bellatrix {
|
||||
executionEnabled, err := blocks.ExecutionEnabled(postState, body)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if execution is enabled")
|
||||
}
|
||||
if executionEnabled {
|
||||
headPayload, err := s.headBlock().Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO_MERGE: Loading the finalized block from DB on per block is not ideal. Finalized block should be cached here
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(finalized.Root))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finalizedBlockHash := params.BeaconConfig().ZeroHash[:]
|
||||
if finalizedBlock != nil && finalizedBlock.Version() == version.Bellatrix {
|
||||
finalizedPayload, err := finalizedBlock.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finalizedBlockHash = finalizedPayload.BlockHash
|
||||
}
|
||||
|
||||
fcs := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: headPayload.BlockHash,
|
||||
SafeBlockHash: headPayload.BlockHash,
|
||||
FinalizedBlockHash: finalizedBlockHash,
|
||||
}
|
||||
resp, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, nil /* attribute */)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"status:": resp.Status.Status,
|
||||
"hash:": fmt.Sprintf("%#x", headPayload.BlockHash),
|
||||
}).Info("Successfully called forkchoiceUpdated")
|
||||
|
||||
switch resp.Status.Status {
|
||||
case enginev1.PayloadStatus_INVALID, enginev1.PayloadStatus_INVALID_BLOCK_HASH, enginev1.PayloadStatus_INVALID_TERMINAL_BLOCK:
|
||||
return fmt.Errorf("could not prcess execution payload with status : %v", resp.Status.Status)
|
||||
case enginev1.PayloadStatus_SYNCING, enginev1.PayloadStatus_ACCEPTED:
|
||||
candidate, err := s.optimisticCandidateBlock(ctx, b)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if block is optimistic candidate")
|
||||
}
|
||||
if !candidate {
|
||||
return errors.Wrap(err, "could not optimistically sync block")
|
||||
}
|
||||
break
|
||||
case enginev1.PayloadStatus_VALID:
|
||||
default:
|
||||
return errors.Wrap(err, "could not execute payload")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.pruneCanonicalAttsFromPool(ctx, blockRoot, signed); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -247,32 +392,33 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlock,
|
||||
blockRoots [][32]byte) ([]*ethpb.Checkpoint, []*ethpb.Checkpoint, error) {
|
||||
blockRoots [][32]byte) ([]*ethpb.Checkpoint, []*ethpb.Checkpoint, []bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
if len(blks) == 0 || len(blockRoots) == 0 {
|
||||
return nil, nil, errors.New("no blocks provided")
|
||||
return nil, nil, nil, errors.New("no blocks provided")
|
||||
}
|
||||
if err := helpers.BeaconBlockIsNil(blks[0]); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
b := blks[0].Block()
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
if err := s.verifyBlkPreState(ctx, b); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(b.ParentRoot()))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if preState == nil || preState.IsNil() {
|
||||
return nil, nil, fmt.Errorf("nil pre state for slot %d", b.Slot())
|
||||
return nil, nil, nil, fmt.Errorf("nil pre state for slot %d", b.Slot())
|
||||
}
|
||||
|
||||
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
fCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
optimistic := make([]bool, len(blks))
|
||||
sigSet := &bls.SignatureBatch{
|
||||
Signatures: [][]byte{},
|
||||
PublicKeys: []bls.PublicKey{},
|
||||
@@ -281,43 +427,140 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
|
||||
var set *bls.SignatureBatch
|
||||
boundaries := make(map[[32]byte]state.BeaconState)
|
||||
for i, b := range blks {
|
||||
preStateCopied := preState.Copy() // TODO_MERGE: Optimize this copy.
|
||||
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
// Non merge blocks are never optimistic
|
||||
optimistic[i] = false
|
||||
if preState.Version() == version.Bellatrix {
|
||||
executionEnabled, err := blocks.ExecutionEnabled(preState, b.Block().Body())
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "could not check if execution is enabled")
|
||||
}
|
||||
if executionEnabled {
|
||||
payload, err := b.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "could not get body execution payload")
|
||||
}
|
||||
status, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
switch status.Status {
|
||||
case enginev1.PayloadStatus_INVALID, enginev1.PayloadStatus_INVALID_BLOCK_HASH, enginev1.PayloadStatus_INVALID_TERMINAL_BLOCK:
|
||||
// TODO_MERGE walk up the parent chain removing
|
||||
return nil, nil, nil, fmt.Errorf("could not prcess execution payload with status : %v", status.Status)
|
||||
case enginev1.PayloadStatus_SYNCING, enginev1.PayloadStatus_ACCEPTED:
|
||||
candidate, err := s.optimisticCandidateBlock(ctx, b.Block())
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "could not check if block is optimistic candidate")
|
||||
}
|
||||
if !candidate {
|
||||
return nil, nil, nil, errors.New("could not optimistically sync block")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", bytesutil.Trunc(blockRoots[i][:])),
|
||||
"payloadHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
}).Info("Block is optimistic candidate")
|
||||
optimistic[i] = true
|
||||
break
|
||||
case enginev1.PayloadStatus_VALID:
|
||||
default:
|
||||
return nil, nil, nil, errors.New("unknown payload status")
|
||||
}
|
||||
if !optimistic[i] {
|
||||
mergeBlock, err := blocks.MergeTransitionBlock(preStateCopied, b.Block().Body())
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "could not check if merge block is terminal")
|
||||
}
|
||||
if mergeBlock {
|
||||
if err := s.validateTerminalBlock(ctx, b); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
headPayload, err := b.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
|
||||
}
|
||||
// TODO_MERGE: Loading the finalized block from DB on per block is not ideal. Finalized block should be cached here
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(preState.FinalizedCheckpoint().Root))
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
|
||||
}
|
||||
finalizedBlockHash := params.BeaconConfig().ZeroHash[:]
|
||||
if finalizedBlock != nil && finalizedBlock.Version() == version.Bellatrix {
|
||||
finalizedPayload, err := finalizedBlock.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
|
||||
}
|
||||
finalizedBlockHash = finalizedPayload.BlockHash
|
||||
}
|
||||
|
||||
fcs := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: headPayload.BlockHash,
|
||||
SafeBlockHash: headPayload.BlockHash,
|
||||
FinalizedBlockHash: finalizedBlockHash,
|
||||
}
|
||||
|
||||
resp, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, nil /* attribute */)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
switch resp.Status.Status {
|
||||
case enginev1.PayloadStatus_INVALID, enginev1.PayloadStatus_INVALID_BLOCK_HASH, enginev1.PayloadStatus_INVALID_TERMINAL_BLOCK:
|
||||
return nil, nil, nil, fmt.Errorf("could not prcess execution payload with status : %v", resp.Status.Status)
|
||||
case enginev1.PayloadStatus_SYNCING, enginev1.PayloadStatus_ACCEPTED:
|
||||
break
|
||||
case enginev1.PayloadStatus_VALID:
|
||||
default:
|
||||
return nil, nil, nil, errors.Wrap(err, "could not execute payload")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save potential boundary states.
|
||||
if slots.IsEpochStart(preState.Slot()) {
|
||||
boundaries[blockRoots[i]] = preState.Copy()
|
||||
if err := s.handleEpochBoundary(ctx, preState); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not handle epoch boundary state")
|
||||
return nil, nil, nil, errors.Wrap(err, "could not handle epoch boundary state")
|
||||
}
|
||||
}
|
||||
jCheckpoints[i] = preState.CurrentJustifiedCheckpoint()
|
||||
fCheckpoints[i] = preState.FinalizedCheckpoint()
|
||||
|
||||
sigSet.Join(set)
|
||||
}
|
||||
verify, err := sigSet.Verify()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if !verify {
|
||||
return nil, nil, errors.New("batch block signature verification failed")
|
||||
return nil, nil, nil, errors.New("batch block signature verification failed")
|
||||
}
|
||||
for r, st := range boundaries {
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
}
|
||||
// Also saves the last post state which to be used as pre state for the next batch.
|
||||
lastB := blks[len(blks)-1]
|
||||
lastBR := blockRoots[len(blockRoots)-1]
|
||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if err := s.saveHeadNoDB(ctx, lastB, lastBR, preState); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return fCheckpoints, jCheckpoints, nil
|
||||
return fCheckpoints, jCheckpoints, optimistic, nil
|
||||
}
|
||||
|
||||
// handles a block after the block's batch has been verified, where we can save blocks
|
||||
@@ -501,3 +744,94 @@ func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validates terminal block hash in the event of manual overrides before checking for total difficulty.
|
||||
//
|
||||
// def validate_merge_block(block: BeaconBlock) -> None:
|
||||
// """
|
||||
// Check the parent PoW block of execution payload is a valid terminal PoW block.
|
||||
//
|
||||
// Note: Unavailable PoW block(s) may later become available,
|
||||
// and a client software MAY delay a call to ``validate_merge_block``
|
||||
// until the PoW block(s) become available.
|
||||
// """
|
||||
// if TERMINAL_BLOCK_HASH != Hash32():
|
||||
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
|
||||
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
// return block.block_hash == TERMINAL_BLOCK_HASH
|
||||
//
|
||||
// pow_block = get_pow_block(block.body.execution_payload.parent_hash)
|
||||
// # Check if `pow_block` is available
|
||||
// assert pow_block is not None
|
||||
// pow_parent = get_pow_block(pow_block.parent_hash)
|
||||
// # Check if `pow_parent` is available
|
||||
// assert pow_parent is not None
|
||||
// # Check if `pow_block` is a valid terminal PoW block
|
||||
// assert is_valid_terminal_pow_block(pow_block, pow_parent)
|
||||
func (s *Service) validateTerminalBlock(ctx context.Context, b block.SignedBeaconBlock) error {
|
||||
payload, err := b.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bytesutil.ToBytes32(params.BeaconConfig().TerminalBlockHash.Bytes()) != [32]byte{} {
|
||||
// `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
|
||||
if params.BeaconConfig().TerminalBlockHashActivationEpoch > slots.ToEpoch(b.Block().Slot()) {
|
||||
return errors.New("terminal block hash activation epoch not reached")
|
||||
}
|
||||
if !bytes.Equal(payload.ParentHash, params.BeaconConfig().TerminalBlockHash.Bytes()) {
|
||||
return errors.New("parent hash does not match terminal block hash")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
transitionBlk, err := s.cfg.ExecutionEngineCaller.ExecutionBlockByHash(ctx, common.BytesToHash(payload.ParentHash))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get transition block")
|
||||
}
|
||||
parentTransitionBlk, err := s.cfg.ExecutionEngineCaller.ExecutionBlockByHash(ctx, common.BytesToHash(transitionBlk.ParentHash))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get transition parent block")
|
||||
}
|
||||
transitionBlkTDBig, err := hexutil.DecodeBig(transitionBlk.TotalDifficulty)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not decode transition total difficulty")
|
||||
}
|
||||
transitionBlkTTD, overflows := uint256.FromBig(transitionBlkTDBig)
|
||||
if overflows {
|
||||
return errors.New("total difficulty overflows")
|
||||
}
|
||||
parentBlkTD, err := hexutil.DecodeBig(parentTransitionBlk.TotalDifficulty)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not decode transition total difficulty")
|
||||
}
|
||||
parentBlkTTD, overflows := uint256.FromBig(parentBlkTD)
|
||||
if overflows {
|
||||
return errors.New("total difficulty overflows")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Block().Slot(),
|
||||
"transitionBlockHash": common.BytesToHash(payload.ParentHash).String(),
|
||||
"transitionBlockParentHash": common.BytesToHash(transitionBlk.ParentHash).String(),
|
||||
"terminalTotalDifficulty": params.BeaconConfig().TerminalTotalDifficulty,
|
||||
"transitionBlockTotalDifficulty": transitionBlkTTD,
|
||||
"transitionBlockParentTotalDifficulty": parentBlkTTD,
|
||||
}).Info("Validating terminal block")
|
||||
|
||||
validated, err := validateTerminalBlockDifficulties(transitionBlkTTD, parentBlkTTD)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !validated {
|
||||
return errors.New("invalid difficulty for terminal block")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Saves synced and validated tips to DB.
|
||||
func (s *Service) saveSyncedTipsDB(ctx context.Context) error {
|
||||
tips := s.cfg.ForkChoiceStore.SyncedTips()
|
||||
if len(tips) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.cfg.BeaconDB.UpdateValidatedTips(ctx, tips)
|
||||
}
|
||||
|
||||
@@ -113,7 +113,7 @@ func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error
|
||||
}
|
||||
|
||||
if !bytes.Equal(bFinalizedRoot, fRoot[:]) {
|
||||
err := fmt.Errorf("block %#x is not a descendent of the current finalized block slot %d, %#x != %#x",
|
||||
err := fmt.Errorf("block %#x is not a descendant of the current finalized block slot %d, %#x != %#x",
|
||||
bytesutil.Trunc(root[:]), finalizedBlk.Slot(), bytesutil.Trunc(bFinalizedRoot),
|
||||
bytesutil.Trunc(fRoot[:]))
|
||||
tracing.AnnotateError(span, err)
|
||||
|
||||
@@ -100,7 +100,7 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
return b
|
||||
}(),
|
||||
s: st.Copy(),
|
||||
wantErrString: "is not a descendent of the current finalized block",
|
||||
wantErrString: "is not a descendant of the current finalized block",
|
||||
},
|
||||
{
|
||||
name: "same slot as finalized block",
|
||||
@@ -178,7 +178,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
rBlock.Block.ParentRoot = gRoot[:]
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
|
||||
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
|
||||
_, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
|
||||
_, _, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -789,7 +789,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
finalizedRoot: r1,
|
||||
parentRoot: r,
|
||||
},
|
||||
wantedErr: "is not a descendent of the current finalized block slot",
|
||||
wantedErr: "is not a descendant of the current finalized block slot",
|
||||
},
|
||||
{
|
||||
name: "is descendant",
|
||||
@@ -996,3 +996,49 @@ func TestRemoveBlockAttestationsInPool_NonCanonical(t *testing.T) {
|
||||
require.NoError(t, service.pruneCanonicalAttsFromPool(ctx, r, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
require.Equal(t, 1, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
}
|
||||
|
||||
func TestService_saveSyncedTipsDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
b1.Block.ParentRoot = bytesutil.PadTo([]byte{'a'}, 32)
|
||||
r1, err := b1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b100 := util.NewBeaconBlock()
|
||||
b100.Block.Slot = 100
|
||||
b100.Block.ParentRoot = r1[:]
|
||||
r100, err := b100.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b200 := util.NewBeaconBlock()
|
||||
b200.Block.Slot = 200
|
||||
b200.Block.ParentRoot = r1[:]
|
||||
r200, err := b200.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
for _, b := range []*ethpb.SignedBeaconBlock{b1, b100, b200} {
|
||||
beaconBlock := util.NewBeaconBlock()
|
||||
beaconBlock.Block.Slot = b.Block.Slot
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), [32]byte{}, 0, 0))
|
||||
}
|
||||
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateSyncedTipsWithValidRoot(ctx, r100))
|
||||
require.NoError(t, service.saveSyncedTipsDB(ctx))
|
||||
savedTips, err := service.cfg.BeaconDB.ValidatedTips(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(savedTips))
|
||||
require.Equal(t, types.Slot(1), savedTips[r1])
|
||||
require.Equal(t, types.Slot(100), savedTips[r100])
|
||||
|
||||
// Delete invalid root
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateSyncedTipsWithInvalidRoot(ctx, r200))
|
||||
require.NoError(t, service.saveSyncedTipsDB(ctx))
|
||||
savedTips, err = service.cfg.BeaconDB.ValidatedTips(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(savedTips))
|
||||
require.Equal(t, types.Slot(100), savedTips[r100])
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBe
|
||||
defer span.End()
|
||||
|
||||
// Apply state transition on the incoming newly received blockCopy without verifying its BLS contents.
|
||||
fCheckpoints, jCheckpoints, err := s.onBlockBatch(ctx, blocks, blkRoots)
|
||||
fCheckpoints, jCheckpoints, optimistic, err := s.onBlockBatch(ctx, blocks, blkRoots)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not process block in batch")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -90,6 +90,19 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBe
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if !optimistic[i] {
|
||||
root, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateSyncedTipsWithValidRoot(ctx, root); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.saveSyncedTipsDB(ctx); err != nil {
|
||||
return errors.Wrap(err, "could not save synced tips")
|
||||
}
|
||||
}
|
||||
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
@@ -82,7 +83,9 @@ type config struct {
|
||||
StateGen *stategen.State
|
||||
SlasherAttestationsFeed *event.Feed
|
||||
WeakSubjectivityCheckpt *ethpb.Checkpoint
|
||||
BlockFetcher powchain.POWBlockFetcher
|
||||
FinalizedStateAtStartUp state.BeaconState
|
||||
ExecutionEngineCaller enginev1.EngineCaller
|
||||
}
|
||||
|
||||
// NewService instantiates a new block service instance that will
|
||||
@@ -187,6 +190,10 @@ func (s *Service) startFromSavedState(saved state.BeaconState) error {
|
||||
store := protoarray.New(justified.Epoch, finalized.Epoch, bytesutil.ToBytes32(finalized.Root))
|
||||
s.cfg.ForkChoiceStore = store
|
||||
|
||||
if err := s.loadSyncedTips(originRoot, saved.Slot()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ss, err := slots.EpochStart(finalized.Epoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get start slot of finalized epoch")
|
||||
@@ -213,7 +220,7 @@ func (s *Service) startFromSavedState(saved state.BeaconState) error {
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: s.genesisTime,
|
||||
GenesisValidatorsRoot: saved.GenesisValidatorRoot(),
|
||||
GenesisValidatorsRoot: saved.GenesisValidatorsRoot(),
|
||||
},
|
||||
})
|
||||
|
||||
@@ -375,7 +382,7 @@ func (s *Service) onPowchainStart(ctx context.Context, genesisTime time.Time) {
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: genesisTime,
|
||||
GenesisValidatorsRoot: initializedState.GenesisValidatorRoot(),
|
||||
GenesisValidatorsRoot: initializedState.GenesisValidatorsRoot(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
@@ -163,6 +164,74 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
require.LogsContain(t, hook, "data already exists")
|
||||
}
|
||||
|
||||
func TestChainStart_SyncedTipsInDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
|
||||
genesisBlk := util.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(1))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
chainService.cfg.FinalizedStateAtStartUp = s
|
||||
|
||||
tips := make(map[[32]byte]types.Slot)
|
||||
tips[bytesutil.ToBytes32([]byte{'a'})] = 1
|
||||
tips[bytesutil.ToBytes32([]byte{'b'})] = 2
|
||||
require.NoError(t, beaconDB.UpdateValidatedTips(ctx, tips))
|
||||
|
||||
// Test the start function.
|
||||
chainService.Start()
|
||||
|
||||
// Test synced Tips in DB
|
||||
tips2 := chainService.cfg.ForkChoiceStore.SyncedTips()
|
||||
require.Equal(t, len(tips2), len(tips))
|
||||
for k, v := range tips {
|
||||
v2, ok := tips2[k]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, v, v2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChainStart_SyncedTipsNotInDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
|
||||
genesisBlk := util.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(1))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
chainService.cfg.FinalizedStateAtStartUp = s
|
||||
// Test the start function.
|
||||
chainService.Start()
|
||||
|
||||
// Test synced Tips in DB
|
||||
tips := chainService.cfg.ForkChoiceStore.SyncedTips()
|
||||
require.Equal(t, 1, len(tips))
|
||||
slot, ok := tips[blkRoot]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, types.Slot(1), slot)
|
||||
}
|
||||
|
||||
func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
@@ -278,7 +347,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(finalizedSlot))
|
||||
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
@@ -322,7 +391,7 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(finalizedSlot))
|
||||
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
@@ -378,7 +447,7 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(headBlock.Block.Slot))
|
||||
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, finalizedRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
|
||||
@@ -20,7 +20,6 @@ go_library(
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
@@ -29,36 +28,39 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var ErrNilState = errors.New("nil state")
|
||||
|
||||
// ChainService defines the mock interface for testing
|
||||
type ChainService struct {
|
||||
State state.BeaconState
|
||||
Root []byte
|
||||
Block block.SignedBeaconBlock
|
||||
Optimistic bool
|
||||
ValidAttestation bool
|
||||
ValidatorsRoot [32]byte
|
||||
PublicKey [fieldparams.BLSPubkeyLength]byte
|
||||
FinalizedCheckPoint *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckPoint *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckPoint *ethpb.Checkpoint
|
||||
BlocksReceived []block.SignedBeaconBlock
|
||||
Slot *types.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
|
||||
Balance *precompute.Balance
|
||||
Genesis time.Time
|
||||
ValidatorsRoot [32]byte
|
||||
ForkChoiceStore *protoarray.Store
|
||||
CanonicalRoots map[[32]byte]bool
|
||||
Fork *ethpb.Fork
|
||||
ETH1Data *ethpb.Eth1Data
|
||||
InitSyncBlockRoots map[[32]byte]bool
|
||||
DB db.Database
|
||||
State state.BeaconState
|
||||
Block block.SignedBeaconBlock
|
||||
VerifyBlkDescendantErr error
|
||||
stateNotifier statefeed.Notifier
|
||||
BlocksReceived []block.SignedBeaconBlock
|
||||
SyncCommitteeIndices []types.CommitteeIndex
|
||||
blockNotifier blockfeed.Notifier
|
||||
opNotifier opfeed.Notifier
|
||||
ValidAttestation bool
|
||||
ForkChoiceStore *protoarray.Store
|
||||
VerifyBlkDescendantErr error
|
||||
Slot *types.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
|
||||
SyncCommitteeIndices []types.CommitteeIndex
|
||||
Root []byte
|
||||
SyncCommitteeDomain []byte
|
||||
SyncSelectionProofDomain []byte
|
||||
SyncContributionProofDomain []byte
|
||||
PublicKey [fieldparams.BLSPubkeyLength]byte
|
||||
SyncCommitteePubkeys [][]byte
|
||||
InitSyncBlockRoots map[[32]byte]bool
|
||||
Genesis time.Time
|
||||
}
|
||||
|
||||
// StateNotifier mocks the same method in the chain service.
|
||||
@@ -159,7 +161,7 @@ func (mon *MockOperationNotifier) OperationFeed() *event.Feed {
|
||||
// ReceiveBlockInitialSync mocks ReceiveBlockInitialSync method in chain service.
|
||||
func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block block.SignedBeaconBlock, _ [32]byte) error {
|
||||
if s.State == nil {
|
||||
s.State = &v1.BeaconState{}
|
||||
return ErrNilState
|
||||
}
|
||||
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
|
||||
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block().ParentRoot())
|
||||
@@ -186,7 +188,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block block.
|
||||
// ReceiveBlockBatch processes blocks in batches from initial-sync.
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []block.SignedBeaconBlock, _ [][32]byte) error {
|
||||
if s.State == nil {
|
||||
s.State = &v1.BeaconState{}
|
||||
return ErrNilState
|
||||
}
|
||||
for _, block := range blks {
|
||||
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
|
||||
@@ -215,7 +217,7 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []block.Signe
|
||||
// ReceiveBlock mocks ReceiveBlock method in chain service.
|
||||
func (s *ChainService) ReceiveBlock(ctx context.Context, block block.SignedBeaconBlock, _ [32]byte) error {
|
||||
if s.State == nil {
|
||||
s.State = &v1.BeaconState{}
|
||||
return ErrNilState
|
||||
}
|
||||
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
|
||||
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block().ParentRoot())
|
||||
@@ -328,8 +330,8 @@ func (s *ChainService) GenesisTime() time.Time {
|
||||
return s.Genesis
|
||||
}
|
||||
|
||||
// GenesisValidatorRoot mocks the same method in the chain service.
|
||||
func (s *ChainService) GenesisValidatorRoot() [32]byte {
|
||||
// GenesisValidatorsRoot mocks the same method in the chain service.
|
||||
func (s *ChainService) GenesisValidatorsRoot() [32]byte {
|
||||
return s.ValidatorsRoot
|
||||
}
|
||||
|
||||
@@ -369,8 +371,8 @@ func (s *ChainService) HasInitSyncBlock(rt [32]byte) bool {
|
||||
return s.InitSyncBlockRoots[rt]
|
||||
}
|
||||
|
||||
// HeadGenesisValidatorRoot mocks HeadGenesisValidatorRoot method in chain service.
|
||||
func (_ *ChainService) HeadGenesisValidatorRoot() [32]byte {
|
||||
// HeadGenesisValidatorsRoot mocks HeadGenesisValidatorsRoot method in chain service.
|
||||
func (_ *ChainService) HeadGenesisValidatorsRoot() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
@@ -438,3 +440,13 @@ func (s *ChainService) HeadSyncSelectionProofDomain(_ context.Context, _ types.S
|
||||
func (s *ChainService) HeadSyncContributionProofDomain(_ context.Context, _ types.Slot) ([]byte, error) {
|
||||
return s.SyncContributionProofDomain, nil
|
||||
}
|
||||
|
||||
// IsOptimistic mocks the same method in the chain service.
|
||||
func (s *ChainService) IsOptimistic(_ context.Context) (bool, error) {
|
||||
return s.Optimistic, nil
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot mocks the same method in the chain service.
|
||||
func (s *ChainService) IsOptimisticForRoot(_ context.Context, _ [32]byte, _ types.Slot) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
1
beacon-chain/cache/BUILD.bazel
vendored
1
beacon-chain/cache/BUILD.bazel
vendored
@@ -30,7 +30,6 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//cache/lru:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/cache/lru"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
)
|
||||
@@ -33,8 +32,7 @@ func (c *SyncCommitteeHeadStateCache) Put(slot types.Slot, st state.BeaconState)
|
||||
return ErrNilValueProvided
|
||||
}
|
||||
|
||||
_, ok := st.(*v1.BeaconState)
|
||||
if ok {
|
||||
if st.Version() == version.Phase0 {
|
||||
return ErrIncorrectType
|
||||
}
|
||||
|
||||
|
||||
@@ -102,7 +102,7 @@ func FilterSyncCommitteeVotes(s state.BeaconStateAltair, sync *ethpb.SyncAggrega
|
||||
// VerifySyncCommitteeSig verifies sync committee signature `syncSig` is valid with respect to public keys `syncKeys`.
|
||||
func VerifySyncCommitteeSig(s state.BeaconStateAltair, syncKeys []bls.PublicKey, syncSig []byte) error {
|
||||
ps := slots.PrevSlot(s.Slot())
|
||||
d, err := signing.Domain(s.Fork(), slots.ToEpoch(ps), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorRoot())
|
||||
d, err := signing.Domain(s.Fork(), slots.ToEpoch(ps), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -8,7 +8,9 @@ import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -18,7 +20,7 @@ import (
|
||||
)
|
||||
|
||||
func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
getState := func(t *testing.T, count uint64) *stateAltair.BeaconState {
|
||||
getState := func(t *testing.T, count uint64) state.BeaconStateAltair {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -35,7 +37,7 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
}
|
||||
|
||||
type args struct {
|
||||
state *stateAltair.BeaconState
|
||||
state state.BeaconStateAltair
|
||||
epoch types.Epoch
|
||||
}
|
||||
tests := []struct {
|
||||
@@ -45,9 +47,9 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
name: "nil state",
|
||||
name: "nil inner state",
|
||||
args: args{
|
||||
state: nil,
|
||||
state: &v2.BeaconState{},
|
||||
},
|
||||
wantErr: true,
|
||||
errString: "nil inner state",
|
||||
@@ -93,7 +95,7 @@ func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
|
||||
func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
getState := func(t *testing.T, count uint64) *stateAltair.BeaconState {
|
||||
getState := func(t *testing.T, count uint64) state.BeaconStateAltair {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -127,7 +129,7 @@ func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncCommittee_CanGet(t *testing.T) {
|
||||
getState := func(t *testing.T, count uint64) *stateAltair.BeaconState {
|
||||
getState := func(t *testing.T, count uint64) state.BeaconStateAltair {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
blsKey, err := bls.RandKey()
|
||||
@@ -147,7 +149,7 @@ func TestSyncCommittee_CanGet(t *testing.T) {
|
||||
}
|
||||
|
||||
type args struct {
|
||||
state *stateAltair.BeaconState
|
||||
state state.BeaconStateAltair
|
||||
epoch types.Epoch
|
||||
}
|
||||
tests := []struct {
|
||||
@@ -157,9 +159,9 @@ func TestSyncCommittee_CanGet(t *testing.T) {
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
name: "nil state",
|
||||
name: "nil inner state",
|
||||
args: args{
|
||||
state: nil,
|
||||
state: &v2.BeaconState{},
|
||||
},
|
||||
wantErr: true,
|
||||
errString: "nil inner state",
|
||||
@@ -382,7 +384,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func getState(t *testing.T, count uint64) *stateAltair.BeaconState {
|
||||
func getState(t *testing.T, count uint64) state.BeaconStateAltair {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
blsKey, err := bls.RandKey()
|
||||
|
||||
@@ -68,7 +68,7 @@ func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.Beacon
|
||||
numValidators := state.NumValidators()
|
||||
s := ðpb.BeaconStateAltair{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorRoot(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: state.Fork().CurrentVersion,
|
||||
@@ -137,7 +137,7 @@ func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.Beacon
|
||||
// for index in get_attesting_indices(state, data, attestation.aggregation_bits):
|
||||
// for flag_index in participation_flag_indices:
|
||||
// epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
|
||||
func TranslateParticipation(ctx context.Context, state *statealtair.BeaconState, atts []*ethpb.PendingAttestation) (*statealtair.BeaconState, error) {
|
||||
func TranslateParticipation(ctx context.Context, state state.BeaconStateAltair, atts []*ethpb.PendingAttestation) (state.BeaconStateAltair, error) {
|
||||
epochParticipation, err := state.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
@@ -20,12 +19,10 @@ import (
|
||||
func TestTranslateParticipation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
s, _ := util.DeterministicGenesisStateAltair(t, 64)
|
||||
st, ok := s.(*stateAltair.BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
require.NoError(t, st.SetSlot(st.Slot()+params.BeaconConfig().MinAttestationInclusionDelay))
|
||||
require.NoError(t, s.SetSlot(s.Slot()+params.BeaconConfig().MinAttestationInclusionDelay))
|
||||
|
||||
var err error
|
||||
newState, err := altair.TranslateParticipation(ctx, st, nil)
|
||||
newState, err := altair.TranslateParticipation(ctx, s, nil)
|
||||
require.NoError(t, err)
|
||||
participation, err := newState.PreviousEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
@@ -56,7 +53,7 @@ func TestTranslateParticipation(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepNotSSZEqual(t, make([]byte, 64), participation)
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, st, pendingAtts[0].Data.Slot, pendingAtts[0].Data.CommitteeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, s, pendingAtts[0].Data.Slot, pendingAtts[0].Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
indices, err := attestation.AttestingIndices(pendingAtts[0].AggregationBits, committee)
|
||||
require.NoError(t, err)
|
||||
@@ -80,7 +77,7 @@ func TestUpgradeToAltair(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, preForkState.GenesisTime(), aState.GenesisTime())
|
||||
require.DeepSSZEqual(t, preForkState.GenesisValidatorRoot(), aState.GenesisValidatorRoot())
|
||||
require.DeepSSZEqual(t, preForkState.GenesisValidatorsRoot(), aState.GenesisValidatorsRoot())
|
||||
require.Equal(t, preForkState.Slot(), aState.Slot())
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), aState.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), aState.BlockRoots())
|
||||
|
||||
@@ -98,6 +98,7 @@ go_test(
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation/aggregation/attestations:go_default_library",
|
||||
"//proto/prysm/v1alpha1/wrapper:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
|
||||
@@ -208,7 +208,7 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBea
|
||||
beaconState.Fork(),
|
||||
indexedAtt.Data.Target.Epoch,
|
||||
params.BeaconConfig().DomainBeaconAttester,
|
||||
beaconState.GenesisValidatorRoot(),
|
||||
beaconState.GenesisValidatorsRoot(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -337,7 +337,9 @@ func TestValidateIndexedAttestation_AboveMaxLength(t *testing.T) {
|
||||
}
|
||||
|
||||
want := "validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE"
|
||||
err := blocks.VerifyIndexedAttestation(context.Background(), &v1.BeaconState{}, indexedAtt1)
|
||||
st, err := v1.InitializeFromProtoUnsafe(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
err = blocks.VerifyIndexedAttestation(context.Background(), st, indexedAtt1)
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -415,7 +417,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
|
||||
Slot: 1,
|
||||
},
|
||||
})
|
||||
prevDomain, err := signing.Domain(st.Fork(), st.Fork().Epoch-1, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
|
||||
prevDomain, err := signing.Domain(st.Fork(), st.Fork().Epoch-1, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := signing.ComputeSigningRoot(att1.Data, prevDomain)
|
||||
require.NoError(t, err)
|
||||
@@ -435,7 +437,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
})
|
||||
currDomain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
|
||||
currDomain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
root, err = signing.ComputeSigningRoot(att2.Data, currDomain)
|
||||
require.NoError(t, err)
|
||||
@@ -474,7 +476,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
|
||||
Slot: 1,
|
||||
},
|
||||
})
|
||||
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
|
||||
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
require.NoError(t, err)
|
||||
@@ -538,7 +540,7 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
|
||||
Slot: 1,
|
||||
},
|
||||
})
|
||||
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
|
||||
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -108,7 +108,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
},
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
@@ -177,7 +177,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusAltair(t *testing.T) {
|
||||
},
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
@@ -246,7 +246,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
|
||||
},
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
|
||||
@@ -74,7 +74,8 @@ func TestFuzzverifyDepositDataSigningRoot_10000(_ *testing.T) {
|
||||
func TestFuzzProcessEth1DataInBlock_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
e := ðpb.Eth1Data{}
|
||||
state := &v1.BeaconState{}
|
||||
state, err := v1.InitializeFromProtoUnsafe(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(e)
|
||||
|
||||
@@ -44,7 +44,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
|
||||
AttestingIndices: setA,
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
require.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
@@ -175,12 +176,10 @@ func TestProcessEth1Data_SetsCorrectly(t *testing.T) {
|
||||
}
|
||||
|
||||
period := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod)))
|
||||
var ok bool
|
||||
for i := uint64(0); i < period; i++ {
|
||||
processedState, err := blocks.ProcessEth1DataInBlock(context.Background(), beaconState, b.Block.Body.Eth1Data)
|
||||
require.NoError(t, err)
|
||||
beaconState, ok = processedState.(*v1.BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, true, processedState.Version() == version.Phase0)
|
||||
}
|
||||
|
||||
newETH1DataVotes := beaconState.Eth1DataVotes()
|
||||
|
||||
@@ -57,7 +57,7 @@ func ProcessVoluntaryExits(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := VerifyExitAndSignature(val, beaconState.Slot(), beaconState.Fork(), exit, beaconState.GenesisValidatorRoot()); err != nil {
|
||||
if err := VerifyExitAndSignature(val, beaconState.Slot(), beaconState.Fork(), exit, beaconState.GenesisValidatorsRoot()); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not verify exit %d", idx)
|
||||
}
|
||||
beaconState, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex)
|
||||
|
||||
@@ -2,6 +2,7 @@ package blocks
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
@@ -16,13 +17,13 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
// MergeComplete returns true if the transition to Bellatrix has completed.
|
||||
// MergeTransitionComplete returns true if the transition to Bellatrix has completed.
|
||||
// Meaning the payload header in beacon state is not `ExecutionPayloadHeader()` (i.e. not empty).
|
||||
//
|
||||
// Spec code:
|
||||
// def is_merge_complete(state: BeaconState) -> bool:
|
||||
// def is_merge_transition_complete(state: BeaconState) -> bool:
|
||||
// return state.latest_execution_payload_header != ExecutionPayloadHeader()
|
||||
func MergeComplete(st state.BeaconState) (bool, error) {
|
||||
func MergeTransitionComplete(st state.BeaconState) (bool, error) {
|
||||
h, err := st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -31,15 +32,15 @@ func MergeComplete(st state.BeaconState) (bool, error) {
|
||||
return !isEmptyHeader(h), nil
|
||||
}
|
||||
|
||||
// IsMergeBlock returns true if the input block is the terminal merge block.
|
||||
// MergeTransitionBlock returns true if the input block is the terminal merge block.
|
||||
// Meaning the header in beacon state is `ExecutionPayloadHeader()` (i.e. empty).
|
||||
// And the input block has a non-empty header.
|
||||
//
|
||||
// Spec code:
|
||||
// def is_merge_block(state: BeaconState, body: BeaconBlockBody) -> bool:
|
||||
// return not is_merge_complete(state) and body.execution_payload != ExecutionPayload()
|
||||
func IsMergeBlock(st state.BeaconState, blk block.BeaconBlockBody) (bool, error) {
|
||||
mergeComplete, err := MergeComplete(st)
|
||||
// def is_merge_transition_block(state: BeaconState, body: BeaconBlockBody) -> bool:
|
||||
// return not is_merge_transition_complete(state) and body.execution_payload != ExecutionPayload()
|
||||
func MergeTransitionBlock(st state.BeaconState, body block.BeaconBlockBody) (bool, error) {
|
||||
mergeComplete, err := MergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -47,8 +48,20 @@ func IsMergeBlock(st state.BeaconState, blk block.BeaconBlockBody) (bool, error)
|
||||
return false, err
|
||||
}
|
||||
|
||||
payload, err := blk.ExecutionPayload()
|
||||
return ExecutionBlock(body)
|
||||
}
|
||||
|
||||
// ExecutionBlock returns whether the block has a non-empty ExecutionPayload.
|
||||
//
|
||||
// Spec code:
|
||||
// def is_execution_block(block: BeaconBlock) -> bool:
|
||||
// return block.body.execution_payload != ExecutionPayload()
|
||||
func ExecutionBlock(body block.BeaconBlockBody) (bool, error) {
|
||||
payload, err := body.ExecutionPayload()
|
||||
if err != nil {
|
||||
if strings.HasPrefix(err.Error(), "ExecutionPayload is not supported in") {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return !isEmptyPayload(payload), nil
|
||||
@@ -60,15 +73,15 @@ func IsMergeBlock(st state.BeaconState, blk block.BeaconBlockBody) (bool, error)
|
||||
// Spec code:
|
||||
// def is_execution_enabled(state: BeaconState, body: BeaconBlockBody) -> bool:
|
||||
// return is_merge_block(state, body) or is_merge_complete(state)
|
||||
func ExecutionEnabled(st state.BeaconState, blk block.BeaconBlockBody) (bool, error) {
|
||||
mergeBlock, err := IsMergeBlock(st, blk)
|
||||
func ExecutionEnabled(st state.BeaconState, body block.BeaconBlockBody) (bool, error) {
|
||||
mergeBlock, err := MergeTransitionBlock(st, body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if mergeBlock {
|
||||
return true, nil
|
||||
}
|
||||
return MergeComplete(st)
|
||||
return MergeTransitionComplete(st)
|
||||
}
|
||||
|
||||
// ValidatePayloadWhenMergeCompletes validates if payload is valid versus input beacon state.
|
||||
@@ -79,7 +92,7 @@ func ExecutionEnabled(st state.BeaconState, blk block.BeaconBlockBody) (bool, er
|
||||
// if is_merge_complete(state):
|
||||
// assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload *enginev1.ExecutionPayload) error {
|
||||
complete, err := MergeComplete(st)
|
||||
complete, err := MergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -160,7 +160,7 @@ func Test_MergeComplete(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.payload))
|
||||
got, err := blocks.MergeComplete(st)
|
||||
got, err := blocks.MergeTransitionComplete(st)
|
||||
require.NoError(t, err)
|
||||
if got != tt.want {
|
||||
t.Errorf("mergeComplete() got = %v, want %v", got, tt.want)
|
||||
@@ -341,15 +341,49 @@ func Test_MergeBlock(t *testing.T) {
|
||||
blk.Block.Body.ExecutionPayload = tt.payload
|
||||
body, err := wrapper.WrappedBellatrixBeaconBlockBody(blk.Block.Body)
|
||||
require.NoError(t, err)
|
||||
got, err := blocks.IsMergeBlock(st, body)
|
||||
got, err := blocks.MergeTransitionBlock(st, body)
|
||||
require.NoError(t, err)
|
||||
if got != tt.want {
|
||||
t.Errorf("IsMergeBlock() got = %v, want %v", got, tt.want)
|
||||
t.Errorf("MergeTransitionBlock() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_IsExecutionBlock(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
payload *enginev1.ExecutionPayload
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "empty payload",
|
||||
payload: emptyPayload(),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "non-empty payload",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload = tt.payload
|
||||
wrappedBlock, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
|
||||
require.NoError(t, err)
|
||||
got, err := blocks.ExecutionBlock(wrappedBlock.Body())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ExecutionEnabled(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -617,7 +651,7 @@ func BenchmarkBellatrixComplete(b *testing.B) {
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := blocks.MergeComplete(st)
|
||||
_, err := blocks.MergeTransitionComplete(st)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -378,7 +378,7 @@ func TestVerifyProposerSlashing(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
sk := sks[tt.args.slashing.Header_1.Header.ProposerIndex]
|
||||
d, err := signing.Domain(tt.args.beaconState.Fork(), slots.ToEpoch(tt.args.slashing.Header_1.Header.Slot), params.BeaconConfig().DomainBeaconProposer, tt.args.beaconState.GenesisValidatorRoot())
|
||||
d, err := signing.Domain(tt.args.beaconState.Fork(), slots.ToEpoch(tt.args.slashing.Header_1.Header.Slot), params.BeaconConfig().DomainBeaconProposer, tt.args.beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
if tt.args.slashing.Header_1.Signature == nil {
|
||||
sr, err := signing.ComputeSigningRoot(tt.args.slashing.Header_1.Header, d)
|
||||
|
||||
@@ -26,7 +26,7 @@ func TestProcessRandao_IncorrectProposerFailsVerification(t *testing.T) {
|
||||
epoch := types.Epoch(0)
|
||||
buf := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(buf, uint64(epoch))
|
||||
domain, err := signing.Domain(beaconState.Fork(), epoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorRoot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), epoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := (ðpb.SigningData{ObjectRoot: buf, Domain: domain}).HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -68,7 +68,7 @@ func VerifyBlockSignature(beaconState state.ReadOnlyBeaconState,
|
||||
sig []byte,
|
||||
rootFunc func() ([32]byte, error)) error {
|
||||
currentEpoch := slots.ToEpoch(beaconState.Slot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -83,7 +83,7 @@ func VerifyBlockSignature(beaconState state.ReadOnlyBeaconState,
|
||||
// VerifyBlockHeaderSignature verifies the proposer signature of a beacon block header.
|
||||
func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(beaconState.Slot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -104,7 +104,7 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -122,7 +122,7 @@ func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
|
||||
sig []byte,
|
||||
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
|
||||
currentEpoch := slots.ToEpoch(beaconState.Slot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -164,7 +164,7 @@ func randaoSigningData(ctx context.Context, beaconState state.ReadOnlyBeaconStat
|
||||
buf := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(buf, uint64(currentEpoch))
|
||||
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorRoot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
@@ -231,7 +231,7 @@ func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBe
|
||||
}
|
||||
|
||||
fork := beaconState.Fork()
|
||||
gvr := beaconState.GenesisValidatorRoot()
|
||||
gvr := beaconState.GenesisValidatorsRoot()
|
||||
dt := params.BeaconConfig().DomainBeaconAttester
|
||||
|
||||
// Split attestations by fork. Note: the signature domain will differ based on the fork.
|
||||
|
||||
@@ -41,7 +41,7 @@ func TestVerifyBlockHeaderSignature(t *testing.T) {
|
||||
beaconState.Fork(),
|
||||
0,
|
||||
params.BeaconConfig().DomainBeaconProposer,
|
||||
beaconState.GenesisValidatorRoot(),
|
||||
beaconState.GenesisValidatorsRoot(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
htr, err := blockHeader.Header.HashTreeRoot()
|
||||
@@ -77,7 +77,7 @@ func TestVerifyBlockSignatureUsingCurrentFork(t *testing.T) {
|
||||
CurrentVersion: params.BeaconConfig().AltairForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
}
|
||||
domain, err := signing.Domain(fData, 100, params.BeaconConfig().DomainBeaconProposer, bState.GenesisValidatorRoot())
|
||||
domain, err := signing.Domain(fData, 100, params.BeaconConfig().DomainBeaconProposer, bState.GenesisValidatorsRoot())
|
||||
assert.NoError(t, err)
|
||||
rt, err := signing.ComputeSigningRoot(altairBlk.Block, domain)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
@@ -47,8 +48,7 @@ func TestProcessRewardsAndPenaltiesPrecompute(t *testing.T) {
|
||||
|
||||
processedState, err := ProcessRewardsAndPenaltiesPrecompute(beaconState, bp, vp, AttestationsDelta, ProposersDelta)
|
||||
require.NoError(t, err)
|
||||
beaconState, ok := processedState.(*v1.BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, true, processedState.Version() == version.Phase0)
|
||||
|
||||
// Indices that voted everything except for head, lost a bit money
|
||||
wanted := uint64(31999810265)
|
||||
|
||||
@@ -38,7 +38,7 @@ func UpgradeToBellatrix(ctx context.Context, state state.BeaconState) (state.Bea
|
||||
|
||||
s := ðpb.BeaconStateBellatrix{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorRoot(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: state.Fork().CurrentVersion,
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestUpgradeToBellatrix(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, preForkState.GenesisTime(), mSt.GenesisTime())
|
||||
require.DeepSSZEqual(t, preForkState.GenesisValidatorRoot(), mSt.GenesisValidatorRoot())
|
||||
require.DeepSSZEqual(t, preForkState.GenesisValidatorsRoot(), mSt.GenesisValidatorsRoot())
|
||||
require.Equal(t, preForkState.Slot(), mSt.Slot())
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
|
||||
|
||||
@@ -23,7 +23,7 @@ var ErrSigFailedToVerify = errors.New("signature did not verify")
|
||||
|
||||
// ComputeDomainAndSign computes the domain and signing root and sign it using the passed in private key.
|
||||
func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch types.Epoch, obj fssz.HashRoot, domain [4]byte, key bls.SecretKey) ([]byte, error) {
|
||||
d, err := Domain(st.Fork(), epoch, domain, st.GenesisValidatorRoot())
|
||||
d, err := Domain(st.Fork(), epoch, domain, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -69,7 +69,7 @@ func ComputeDomainVerifySigningRoot(st state.ReadOnlyBeaconState, index types.Va
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d, err := Domain(st.Fork(), epoch, domain, st.GenesisValidatorRoot())
|
||||
d, err := Domain(st.Fork(), epoch, domain, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -219,7 +219,7 @@ func computeForkDataRoot(version, root []byte) ([32]byte, error) {
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// ComputeForkDigest returns the fork for the current version and genesis validator root
|
||||
// ComputeForkDigest returns the fork for the current version and genesis validators root
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def compute_fork_digest(current_version: Version, genesis_validators_root: Root) -> ForkDigest:
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
@@ -33,8 +34,7 @@ func TestSkipSlotCache_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
executedState, err := transition.ExecuteStateTransition(context.Background(), originalState, wrapper.WrappedPhase0SignedBeaconBlock(blk))
|
||||
require.NoError(t, err, "Could not run state transition")
|
||||
originalState, ok := executedState.(*v1.BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, true, executedState.Version() == version.Phase0)
|
||||
bState, err = transition.ExecuteStateTransition(context.Background(), bState, wrapper.WrappedPhase0SignedBeaconBlock(blk))
|
||||
require.NoError(t, err, "Could not process state transition")
|
||||
|
||||
@@ -59,8 +59,7 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
executedState, err := transition.ExecuteStateTransition(context.Background(), originalState, wrapper.WrappedPhase0SignedBeaconBlock(blk))
|
||||
require.NoError(t, err, "Could not run state transition")
|
||||
originalState, ok := executedState.(*v1.BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, true, executedState.Version() == version.Phase0)
|
||||
|
||||
// Create two shallow but different forks
|
||||
var s1, s0 state.BeaconState
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
fuzz "github.com/google/gofuzz"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestGenesisBeaconState_1000(t *testing.T) {
|
||||
@@ -37,7 +38,8 @@ func TestOptimizedGenesisBeaconState_1000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
var genesisTime uint64
|
||||
preState := &v1.BeaconState{}
|
||||
preState, err := v1.InitializeFromProtoUnsafe(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
eth1Data := ðpb.Eth1Data{}
|
||||
for i := 0; i < 1000; i++ {
|
||||
fuzzer.Fuzz(&genesisTime)
|
||||
|
||||
@@ -10,13 +10,15 @@ import (
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestFuzzExecuteStateTransition_1000(t *testing.T) {
|
||||
SkipSlotCache.Disable()
|
||||
defer SkipSlotCache.Enable()
|
||||
ctx := context.Background()
|
||||
state := &v1.BeaconState{}
|
||||
state, err := v1.InitializeFromProtoUnsafe(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
sb := ðpb.SignedBeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
@@ -34,7 +36,8 @@ func TestFuzzCalculateStateRoot_1000(t *testing.T) {
|
||||
SkipSlotCache.Disable()
|
||||
defer SkipSlotCache.Enable()
|
||||
ctx := context.Background()
|
||||
state := &v1.BeaconState{}
|
||||
state, err := v1.InitializeFromProtoUnsafe(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
sb := ðpb.SignedBeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
@@ -52,7 +55,8 @@ func TestFuzzProcessSlot_1000(t *testing.T) {
|
||||
SkipSlotCache.Disable()
|
||||
defer SkipSlotCache.Enable()
|
||||
ctx := context.Background()
|
||||
state := &v1.BeaconState{}
|
||||
state, err := v1.InitializeFromProtoUnsafe(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
@@ -68,7 +72,8 @@ func TestFuzzProcessSlots_1000(t *testing.T) {
|
||||
SkipSlotCache.Disable()
|
||||
defer SkipSlotCache.Enable()
|
||||
ctx := context.Background()
|
||||
state := &v1.BeaconState{}
|
||||
state, err := v1.InitializeFromProtoUnsafe(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
slot := types.Slot(0)
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
@@ -86,7 +91,8 @@ func TestFuzzprocessOperationsNoVerify_1000(t *testing.T) {
|
||||
SkipSlotCache.Disable()
|
||||
defer SkipSlotCache.Enable()
|
||||
ctx := context.Background()
|
||||
state := &v1.BeaconState{}
|
||||
state, err := v1.InitializeFromProtoUnsafe(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
bb := ðpb.SignedBeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
@@ -100,10 +106,11 @@ func TestFuzzprocessOperationsNoVerify_1000(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzverifyOperationLengths_10000(_ *testing.T) {
|
||||
func TestFuzzverifyOperationLengths_10000(t *testing.T) {
|
||||
SkipSlotCache.Disable()
|
||||
defer SkipSlotCache.Enable()
|
||||
state := &v1.BeaconState{}
|
||||
state, err := v1.InitializeFromProtoUnsafe(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
bb := ðpb.SignedBeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
@@ -115,10 +122,11 @@ func TestFuzzverifyOperationLengths_10000(_ *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzCanProcessEpoch_10000(_ *testing.T) {
|
||||
func TestFuzzCanProcessEpoch_10000(t *testing.T) {
|
||||
SkipSlotCache.Disable()
|
||||
defer SkipSlotCache.Enable()
|
||||
state := &v1.BeaconState{}
|
||||
state, err := v1.InitializeFromProtoUnsafe(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 10000; i++ {
|
||||
@@ -131,7 +139,8 @@ func TestFuzzProcessEpochPrecompute_1000(t *testing.T) {
|
||||
SkipSlotCache.Disable()
|
||||
defer SkipSlotCache.Enable()
|
||||
ctx := context.Background()
|
||||
state := &v1.BeaconState{}
|
||||
state, err := v1.InitializeFromProtoUnsafe(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
for i := 0; i < 1000; i++ {
|
||||
@@ -147,7 +156,8 @@ func TestFuzzProcessBlockForStateRoot_1000(t *testing.T) {
|
||||
SkipSlotCache.Disable()
|
||||
defer SkipSlotCache.Enable()
|
||||
ctx := context.Background()
|
||||
state := &v1.BeaconState{}
|
||||
state, err := v1.InitializeFromProtoUnsafe(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
sb := ðpb.SignedBeaconBlock{}
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
fuzzer.NilChance(0.1)
|
||||
|
||||
@@ -253,7 +253,7 @@ func createFullBlockWithOperations(t *testing.T) (state.BeaconState,
|
||||
},
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
hashTreeRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
require.NoError(t, err)
|
||||
@@ -394,7 +394,9 @@ func TestProcessBlock_OverMaxProposerSlashings(t *testing.T) {
|
||||
}
|
||||
want := fmt.Sprintf("number of proposer slashings (%d) in block body exceeds allowed threshold of %d",
|
||||
len(b.Block.Body.ProposerSlashings), params.BeaconConfig().MaxProposerSlashings)
|
||||
_, err := transition.VerifyOperationLengths(context.Background(), &v1.BeaconState{}, wrapper.WrappedPhase0SignedBeaconBlock(b))
|
||||
s, err := v1.InitializeFromProtoUnsafe(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
_, err = transition.VerifyOperationLengths(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -409,7 +411,9 @@ func TestProcessBlock_OverMaxAttesterSlashings(t *testing.T) {
|
||||
}
|
||||
want := fmt.Sprintf("number of attester slashings (%d) in block body exceeds allowed threshold of %d",
|
||||
len(b.Block.Body.AttesterSlashings), params.BeaconConfig().MaxAttesterSlashings)
|
||||
_, err := transition.VerifyOperationLengths(context.Background(), &v1.BeaconState{}, wrapper.WrappedPhase0SignedBeaconBlock(b))
|
||||
s, err := v1.InitializeFromProtoUnsafe(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
_, err = transition.VerifyOperationLengths(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -423,7 +427,9 @@ func TestProcessBlock_OverMaxAttestations(t *testing.T) {
|
||||
}
|
||||
want := fmt.Sprintf("number of attestations (%d) in block body exceeds allowed threshold of %d",
|
||||
len(b.Block.Body.Attestations), params.BeaconConfig().MaxAttestations)
|
||||
_, err := transition.VerifyOperationLengths(context.Background(), &v1.BeaconState{}, wrapper.WrappedPhase0SignedBeaconBlock(b))
|
||||
s, err := v1.InitializeFromProtoUnsafe(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
_, err = transition.VerifyOperationLengths(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -438,7 +444,9 @@ func TestProcessBlock_OverMaxVoluntaryExits(t *testing.T) {
|
||||
}
|
||||
want := fmt.Sprintf("number of voluntary exits (%d) in block body exceeds allowed threshold of %d",
|
||||
len(b.Block.Body.VoluntaryExits), maxExits)
|
||||
_, err := transition.VerifyOperationLengths(context.Background(), &v1.BeaconState{}, wrapper.WrappedPhase0SignedBeaconBlock(b))
|
||||
s, err := v1.InitializeFromProtoUnsafe(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
_, err = transition.VerifyOperationLengths(context.Background(), s, wrapper.WrappedPhase0SignedBeaconBlock(b))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ go_test(
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
@@ -129,8 +130,7 @@ func TestSlashValidator_OK(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
slashedState, err := SlashValidator(context.Background(), state, slashedIdx, cfg.MinSlashingPenaltyQuotient, cfg.ProposerRewardQuotient)
|
||||
require.NoError(t, err, "Could not slash validator")
|
||||
state, ok := slashedState.(*v1.BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, true, slashedState.Version() == version.Phase0)
|
||||
|
||||
v, err := state.ValidatorAtIndex(slashedIdx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -30,6 +30,7 @@ type ReadOnlyDatabase interface {
|
||||
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (block.SignedBeaconBlock, error)
|
||||
HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]block.SignedBeaconBlock, error)
|
||||
ValidatedTips(ctx context.Context) (map[[32]byte]types.Slot, error)
|
||||
// State related methods.
|
||||
State(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
GenesisState(ctx context.Context) (state.BeaconState, error)
|
||||
@@ -58,9 +59,11 @@ type NoHeadAccessDatabase interface {
|
||||
ReadOnlyDatabase
|
||||
|
||||
// Block related methods.
|
||||
DeleteBlock(ctx context.Context, root [32]byte) error
|
||||
SaveBlock(ctx context.Context, block block.SignedBeaconBlock) error
|
||||
SaveBlocks(ctx context.Context, blocks []block.SignedBeaconBlock) error
|
||||
SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
UpdateValidatedTips(ctx context.Context, newVals map[[32]byte]types.Slot) error
|
||||
// State related methods.
|
||||
SaveState(ctx context.Context, state state.ReadOnlyBeaconState, blockRoot [32]byte) error
|
||||
SaveStates(ctx context.Context, states []state.ReadOnlyBeaconState, blockRoots [][32]byte) error
|
||||
|
||||
@@ -204,6 +204,34 @@ func (s *Store) BlockRootsBySlot(ctx context.Context, slot types.Slot) (bool, []
|
||||
return len(blockRoots) > 0, blockRoots, nil
|
||||
}
|
||||
|
||||
// DeleteBlock from the db
|
||||
// This deletes the root entry from all buckets in the blocks DB
|
||||
// If the block is finalized this function returns an error
|
||||
func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.DeleteBlock")
|
||||
defer span.End()
|
||||
|
||||
if err := s.DeleteState(ctx, root); err != nil {
|
||||
return errDeleteFinalized
|
||||
}
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
if b := bkt.Get(root[:]); b != nil {
|
||||
return errDeleteFinalized
|
||||
}
|
||||
|
||||
if err := tx.Bucket(blocksBucket).Delete(root[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tx.Bucket(blockParentRootIndicesBucket).Delete(root[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
s.blockCache.Del(string(root[:]))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// SaveBlock to the db.
|
||||
func (s *Store) SaveBlock(ctx context.Context, signed block.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlock")
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
@@ -163,6 +164,44 @@ func TestStore_BlocksHandleInvalidEndSlot(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_DeleteBlock(t *testing.T) {
|
||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
|
||||
blks := makeBlocks(t, 0, slotsPerEpoch*4, genesisBlockRoot)
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
|
||||
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root[:],
|
||||
}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, st, root))
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
|
||||
root2, err := blks[4*slotsPerEpoch-2].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b, err := db.Block(ctx, root2)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
require.NoError(t, db.DeleteBlock(ctx, root2))
|
||||
st, err = db.State(ctx, root2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, st, nil)
|
||||
|
||||
b, err = db.Block(ctx, root2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b, nil)
|
||||
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), errDeleteFinalized)
|
||||
|
||||
}
|
||||
|
||||
func TestStore_GenesisBlock(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -2,6 +2,9 @@ package kv
|
||||
|
||||
import "errors"
|
||||
|
||||
// errDeleteFinalized is raised when we attempt to delete a finalized block/state
|
||||
var errDeleteFinalized = errors.New("cannot delete finalized block or state")
|
||||
|
||||
// ErrNotFound can be used directly, or as a wrapped DBError, whenever a db method needs to
|
||||
// indicate that a value couldn't be found.
|
||||
var ErrNotFound = errors.New("not found in db")
|
||||
|
||||
@@ -105,6 +105,8 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
}
|
||||
}
|
||||
datafile := KVStoreDatafilePath(dirPath)
|
||||
start := time.Now()
|
||||
log.Infof("Opening Bolt DB at %s", datafile)
|
||||
boltDB, err := bolt.Open(
|
||||
datafile,
|
||||
params.BeaconIoConfig().ReadWritePermissions,
|
||||
@@ -114,29 +116,40 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
log.WithField("elapsed", time.Since(start)).Error("Failed to open Bolt DB")
|
||||
if errors.Is(err, bolt.ErrTimeout) {
|
||||
return nil, errors.New("cannot obtain database lock, database may be in use by another process")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Opened Bolt DB")
|
||||
|
||||
boltDB.AllocSize = boltAllocSize
|
||||
start = time.Now()
|
||||
log.Infof("Creating block cache...")
|
||||
blockCache, err := ristretto.NewCache(&ristretto.Config{
|
||||
NumCounters: 1000, // number of keys to track frequency of (1000).
|
||||
MaxCost: BlockCacheSize, // maximum cost of cache (1000 Blocks).
|
||||
BufferItems: 64, // number of keys per Get buffer.
|
||||
})
|
||||
if err != nil {
|
||||
log.WithField("elapsed", time.Since(start)).Error("Failed to create block cache")
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Created block cache")
|
||||
|
||||
start = time.Now()
|
||||
log.Infof("Creating validator cache...")
|
||||
validatorCache, err := ristretto.NewCache(&ristretto.Config{
|
||||
NumCounters: NumOfValidatorEntries, // number of entries in cache (2 Million).
|
||||
MaxCost: ValidatorEntryMaxCost, // maximum size of the cache (64Mb)
|
||||
BufferItems: 64, // number of keys per Get buffer.
|
||||
})
|
||||
if err != nil {
|
||||
log.WithField("elapsed", time.Since(start)).Error("Failed to to create validator cache")
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Created validator cache")
|
||||
|
||||
kv := &Store{
|
||||
db: boltDB,
|
||||
@@ -146,7 +159,8 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
stateSummaryCache: newStateSummaryCache(),
|
||||
ctx: ctx,
|
||||
}
|
||||
|
||||
start = time.Now()
|
||||
log.Infof("Updating DB and creating buckets...")
|
||||
if err := kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return createBuckets(
|
||||
tx,
|
||||
@@ -179,8 +193,10 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
migrationsBucket,
|
||||
)
|
||||
}); err != nil {
|
||||
log.WithField("elapsed", time.Since(start)).Error("Failed to update db and create buckets")
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Updated db and created buckets")
|
||||
|
||||
err = prometheus.Register(createBoltCollector(kv.db))
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
@@ -19,12 +20,12 @@ import (
|
||||
func Test_migrateStateValidators(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(t *testing.T, dbStore *Store, state *v1.BeaconState, vals []*v1alpha1.Validator)
|
||||
eval func(t *testing.T, dbStore *Store, state *v1.BeaconState, vals []*v1alpha1.Validator)
|
||||
setup func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator)
|
||||
eval func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator)
|
||||
}{
|
||||
{
|
||||
name: "only runs once",
|
||||
setup: func(t *testing.T, dbStore *Store, state *v1.BeaconState, vals []*v1alpha1.Validator) {
|
||||
setup: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
|
||||
// create some new buckets that should be present for this migration
|
||||
err := dbStore.db.Update(func(tx *bbolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists(stateValidatorsBucket)
|
||||
@@ -35,7 +36,7 @@ func Test_migrateStateValidators(t *testing.T) {
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
eval: func(t *testing.T, dbStore *Store, state *v1.BeaconState, vals []*v1alpha1.Validator) {
|
||||
eval: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
|
||||
// check if the migration is completed, per migration table.
|
||||
err := dbStore.db.View(func(tx *bbolt.Tx) error {
|
||||
migrationCompleteOrNot := tx.Bucket(migrationsBucket).Get(migrationStateValidatorsKey)
|
||||
@@ -47,7 +48,7 @@ func Test_migrateStateValidators(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "once migrated, always enable flag",
|
||||
setup: func(t *testing.T, dbStore *Store, state *v1.BeaconState, vals []*v1alpha1.Validator) {
|
||||
setup: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
|
||||
// create some new buckets that should be present for this migration
|
||||
err := dbStore.db.Update(func(tx *bbolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists(stateValidatorsBucket)
|
||||
@@ -58,7 +59,7 @@ func Test_migrateStateValidators(t *testing.T) {
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
eval: func(t *testing.T, dbStore *Store, state *v1.BeaconState, vals []*v1alpha1.Validator) {
|
||||
eval: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
|
||||
// disable the flag and see if the code mandates that flag.
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableHistoricalSpaceRepresentation: false,
|
||||
@@ -111,7 +112,7 @@ func Test_migrateStateValidators(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "migrates validators and adds them to new buckets",
|
||||
setup: func(t *testing.T, dbStore *Store, state *v1.BeaconState, vals []*v1alpha1.Validator) {
|
||||
setup: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
|
||||
// create some new buckets that should be present for this migration
|
||||
err := dbStore.db.Update(func(tx *bbolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists(stateValidatorsBucket)
|
||||
@@ -122,7 +123,7 @@ func Test_migrateStateValidators(t *testing.T) {
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
eval: func(t *testing.T, dbStore *Store, state *v1.BeaconState, vals []*v1alpha1.Validator) {
|
||||
eval: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
|
||||
// check whether the new buckets are present
|
||||
err := dbStore.db.View(func(tx *bbolt.Tx) error {
|
||||
valBkt := tx.Bucket(stateValidatorsBucket)
|
||||
@@ -209,12 +210,12 @@ func Test_migrateStateValidators(t *testing.T) {
|
||||
func Test_migrateAltairStateValidators(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(t *testing.T, dbStore *Store, state *v2.BeaconState, vals []*v1alpha1.Validator)
|
||||
eval func(t *testing.T, dbStore *Store, state *v2.BeaconState, vals []*v1alpha1.Validator)
|
||||
setup func(t *testing.T, dbStore *Store, state state.BeaconStateAltair, vals []*v1alpha1.Validator)
|
||||
eval func(t *testing.T, dbStore *Store, state state.BeaconStateAltair, vals []*v1alpha1.Validator)
|
||||
}{
|
||||
{
|
||||
name: "migrates validators and adds them to new buckets",
|
||||
setup: func(t *testing.T, dbStore *Store, state *v2.BeaconState, vals []*v1alpha1.Validator) {
|
||||
setup: func(t *testing.T, dbStore *Store, state state.BeaconStateAltair, vals []*v1alpha1.Validator) {
|
||||
// create some new buckets that should be present for this migration
|
||||
err := dbStore.db.Update(func(tx *bbolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists(stateValidatorsBucket)
|
||||
@@ -225,7 +226,7 @@ func Test_migrateAltairStateValidators(t *testing.T) {
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
},
|
||||
eval: func(t *testing.T, dbStore *Store, state *v2.BeaconState, vals []*v1alpha1.Validator) {
|
||||
eval: func(t *testing.T, dbStore *Store, state state.BeaconStateAltair, vals []*v1alpha1.Validator) {
|
||||
// check whether the new buckets are present
|
||||
err := dbStore.db.View(func(tx *bbolt.Tx) error {
|
||||
valBkt := tx.Bucket(stateValidatorsBucket)
|
||||
@@ -300,9 +301,9 @@ func Test_migrateAltairStateValidators(t *testing.T) {
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
tt.setup(t, dbStore, st.(*v2.BeaconState), vals)
|
||||
tt.setup(t, dbStore, st, vals)
|
||||
assert.NoError(t, migrateStateValidators(context.Background(), dbStore.db), "migrateArchivedIndex(tx) error")
|
||||
tt.eval(t, dbStore, st.(*v2.BeaconState), vals)
|
||||
tt.eval(t, dbStore, st, vals)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -120,11 +120,6 @@ func (_ *Service) AllDeposits(_ context.Context, _ *big.Int) []*ethpb.Deposit {
|
||||
return []*ethpb.Deposit{}
|
||||
}
|
||||
|
||||
// ChainStartDeposits mocks out the powchain functionality for interop.
|
||||
func (s *Service) ChainStartDeposits() []*ethpb.Deposit {
|
||||
return s.chainStartDeposits
|
||||
}
|
||||
|
||||
// ChainStartEth1Data mocks out the powchain functionality for interop.
|
||||
func (_ *Service) ChainStartEth1Data() *ethpb.Eth1Data {
|
||||
return ðpb.Eth1Data{}
|
||||
@@ -132,7 +127,11 @@ func (_ *Service) ChainStartEth1Data() *ethpb.Eth1Data {
|
||||
|
||||
// PreGenesisState returns an empty beacon state.
|
||||
func (_ *Service) PreGenesisState() state.BeaconState {
|
||||
return &v1.BeaconState{}
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{})
|
||||
if err != nil {
|
||||
panic("could not initialize state")
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// ClearPreGenesisData --
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
)
|
||||
|
||||
// ForkChoicer represents the full fork choice interface composed of all of the sub-interfaces.
|
||||
// ForkChoicer represents the full fork choice interface composed of all the sub-interfaces.
|
||||
type ForkChoicer interface {
|
||||
HeadRetriever // to compute head.
|
||||
BlockProcessor // to track new block for fork choice.
|
||||
@@ -16,11 +16,13 @@ type ForkChoicer interface {
|
||||
Pruner // to clean old data for fork choice.
|
||||
Getter // to retrieve fork choice information.
|
||||
ProposerBooster // ability to boost timely-proposed block roots.
|
||||
SyncTipper // to update and retrieve validated sync tips.
|
||||
}
|
||||
|
||||
// HeadRetriever retrieves head root of the current chain.
|
||||
// HeadRetriever retrieves head root and optimistic info of the current chain.
|
||||
type HeadRetriever interface {
|
||||
Head(context.Context, types.Epoch, [32]byte, []uint64, types.Epoch) ([32]byte, error)
|
||||
Optimistic(ctx context.Context, root [32]byte, slot types.Slot) (bool, error)
|
||||
}
|
||||
|
||||
// BlockProcessor processes the block that's used for accounting fork choice.
|
||||
@@ -54,3 +56,11 @@ type Getter interface {
|
||||
AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([]byte, error)
|
||||
IsCanonical(root [32]byte) bool
|
||||
}
|
||||
|
||||
// SyncTipper returns sync tips related information.
|
||||
type SyncTipper interface {
|
||||
SyncedTips() map[[32]byte]types.Slot
|
||||
SetSyncedTips(tips map[[32]byte]types.Slot) error
|
||||
UpdateSyncedTipsWithValidRoot(ctx context.Context, root [32]byte) error
|
||||
UpdateSyncedTipsWithInvalidRoot(ctx context.Context, root [32]byte) error
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
Package protoarray implements proto array fork choice as outlined:
|
||||
https://github.com/protolambda/lmd-ghost#array-based-stateful-dag-proto_array
|
||||
This was motivated by the the original implementation by Sigma Prime here:
|
||||
This was motivated by the original implementation by Sigma Prime here:
|
||||
https://github.com/sigp/lighthouse/pull/804
|
||||
*/
|
||||
package protoarray
|
||||
|
||||
@@ -11,3 +11,4 @@ var errInvalidBestDescendantIndex = errors.New("best descendant index is invalid
|
||||
var errInvalidParentDelta = errors.New("parent delta is invalid")
|
||||
var errInvalidNodeDelta = errors.New("node delta is invalid")
|
||||
var errInvalidDeltaLength = errors.New("delta length is invalid")
|
||||
var errInvalidSyncedTips = errors.New("invalid synced tips")
|
||||
|
||||
@@ -24,7 +24,7 @@ func computeDeltas(
|
||||
oldBalance := uint64(0)
|
||||
newBalance := uint64(0)
|
||||
|
||||
// Skip if validator has never voted for current root and next root (ie. if the
|
||||
// Skip if validator has never voted for current root and next root (i.e. if the
|
||||
// votes are zero hash aka genesis block), there's nothing to compute.
|
||||
if vote.currentRoot == params.BeaconConfig().ZeroHash && vote.nextRoot == params.BeaconConfig().ZeroHash {
|
||||
continue
|
||||
|
||||
@@ -48,4 +48,16 @@ var (
|
||||
Help: "The number of times pruning happened.",
|
||||
},
|
||||
)
|
||||
lastSyncedTipSlot = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "proto_array_last_synced_tip_slot",
|
||||
Help: "The slot of the last fully validated block added to the proto array.",
|
||||
},
|
||||
)
|
||||
syncedTipsCount = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "proto_array_synced_tips_count",
|
||||
Help: "The number of elements in the syncedTips structure.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -89,17 +89,37 @@ func (f *ForkChoice) Optimistic(ctx context.Context, root [32]byte, slot types.S
|
||||
return f.Optimistic(ctx, root, slot)
|
||||
}
|
||||
|
||||
// UpdateSyncedTips updates the synced_tips map when the block with the given root becomes VALID
|
||||
func (f *ForkChoice) UpdateSyncedTips(ctx context.Context, root [32]byte) error {
|
||||
// This function returns the index of sync tip node that's ancestor to the input node.
|
||||
// In the event of none, `NonExistentNode` is returned.
|
||||
// This internal method assumes the caller holds a lock on syncedTips and s.nodesLock
|
||||
func (s *Store) findSyncedTip(ctx context.Context, node *Node, syncedTips *optimisticStore) (uint64, error) {
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return 0, ctx.Err()
|
||||
}
|
||||
if _, ok := syncedTips.validatedTips[node.root]; ok {
|
||||
return s.nodesIndices[node.root], nil
|
||||
}
|
||||
if node.parent == NonExistentNode {
|
||||
return NonExistentNode, nil
|
||||
}
|
||||
node = s.nodes[node.parent]
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateSyncedTipsWithValidRoot is called with the root of a block that was returned as
|
||||
// VALID by the EL. This routine recomputes and updates the synced_tips map to
|
||||
// account for this new tip.
|
||||
func (f *ForkChoice) UpdateSyncedTipsWithValidRoot(ctx context.Context, root [32]byte) error {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
// We can only update if given root is in fork choice
|
||||
// We can only update if given root is in Fork Choice
|
||||
index, ok := f.store.nodesIndices[root]
|
||||
if !ok {
|
||||
return errInvalidNodeIndex
|
||||
}
|
||||
|
||||
// We can only update if root is a leaf in fork choice
|
||||
// We can only update if root is a leaf in Fork Choice
|
||||
node := f.store.nodes[index]
|
||||
if node.bestChild != NonExistentNode {
|
||||
return errInvalidBestChildIndex
|
||||
@@ -114,7 +134,9 @@ func (f *ForkChoice) UpdateSyncedTips(ctx context.Context, root [32]byte) error
|
||||
}
|
||||
|
||||
// Cache root and slot to validated tips
|
||||
f.syncedTips.validatedTips[root] = node.slot
|
||||
newTips := make(map[[32]byte]types.Slot)
|
||||
newValidSlot := node.slot
|
||||
newTips[root] = newValidSlot
|
||||
|
||||
// Compute the full valid path from the given node to its previous synced tip
|
||||
// This path will now consist of fully validated blocks. Notice that
|
||||
@@ -122,6 +144,7 @@ func (f *ForkChoice) UpdateSyncedTips(ctx context.Context, root [32]byte) error
|
||||
// In this case, only one block can be in syncedTips as the whole
|
||||
// Fork Choice would be a descendant of this block.
|
||||
validPath := make(map[uint64]bool)
|
||||
validPath[index] = true
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
@@ -144,27 +167,19 @@ func (f *ForkChoice) UpdateSyncedTips(ctx context.Context, root [32]byte) error
|
||||
|
||||
// Retrieve the list of leaves in the Fork Choice
|
||||
// These are all the nodes that have NonExistentNode as best child.
|
||||
var leaves []uint64
|
||||
for i := uint64(0); i < uint64(len(f.store.nodes)); i++ {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
node = f.store.nodes[i]
|
||||
if node.bestChild == NonExistentNode {
|
||||
leaves = append(leaves, i)
|
||||
}
|
||||
leaves, err := f.store.leaves()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// For each leaf, recompute the new tip.
|
||||
newTips := make(map[[32]byte]types.Slot)
|
||||
for _, i := range leaves {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
node = f.store.nodes[i]
|
||||
j := i
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
// Stop if we reached the previous tip
|
||||
_, ok = f.syncedTips.validatedTips[node.root]
|
||||
if ok {
|
||||
@@ -191,5 +206,117 @@ func (f *ForkChoice) UpdateSyncedTips(ctx context.Context, root [32]byte) error
|
||||
}
|
||||
|
||||
f.syncedTips.validatedTips = newTips
|
||||
lastSyncedTipSlot.Set(float64(newValidSlot))
|
||||
syncedTipsCount.Set(float64(len(newTips)))
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateSyncedTipsWithInvalidRoot updates the synced_tips map when the block with the given root becomes INVALID.
|
||||
func (f *ForkChoice) UpdateSyncedTipsWithInvalidRoot(ctx context.Context, root [32]byte) error {
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
idx, ok := f.store.nodesIndices[root]
|
||||
if !ok {
|
||||
return errInvalidNodeIndex
|
||||
}
|
||||
node := f.store.nodes[idx]
|
||||
// We only support changing status for the tips in Fork Choice store.
|
||||
if node.bestChild != NonExistentNode {
|
||||
return errInvalidNodeIndex
|
||||
}
|
||||
|
||||
parentIndex := node.parent
|
||||
// This should not happen
|
||||
if parentIndex == NonExistentNode {
|
||||
return errInvalidNodeIndex
|
||||
}
|
||||
// Update the weights of the nodes subtracting the INVALID node's weight
|
||||
weight := node.weight
|
||||
node = f.store.nodes[parentIndex]
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
node.weight -= weight
|
||||
if node.parent == NonExistentNode {
|
||||
break
|
||||
}
|
||||
node = f.store.nodes[node.parent]
|
||||
}
|
||||
parent := copyNode(f.store.nodes[parentIndex])
|
||||
|
||||
// delete the invalid node, order is important
|
||||
f.store.nodes = append(f.store.nodes[:idx], f.store.nodes[idx+1:]...)
|
||||
delete(f.store.nodesIndices, root)
|
||||
// Fix parent and best child for each node
|
||||
for _, node := range f.store.nodes {
|
||||
if node.parent == NonExistentNode {
|
||||
node.parent = NonExistentNode
|
||||
} else if node.parent > idx {
|
||||
node.parent -= 1
|
||||
}
|
||||
if node.bestChild == NonExistentNode || node.bestChild == idx {
|
||||
node.bestChild = NonExistentNode
|
||||
} else if node.bestChild > idx {
|
||||
node.bestChild -= 1
|
||||
}
|
||||
if node.bestDescendant == NonExistentNode || node.bestDescendant == idx {
|
||||
node.bestDescendant = NonExistentNode
|
||||
} else if node.bestDescendant > idx {
|
||||
node.bestDescendant -= 1
|
||||
}
|
||||
}
|
||||
|
||||
// Update the parent's best child and best descendant if necessary.
|
||||
if parent.bestChild == idx || parent.bestDescendant == idx {
|
||||
for childIndex, child := range f.store.nodes {
|
||||
if child.parent == parentIndex {
|
||||
err := f.store.updateBestChildAndDescendant(
|
||||
parentIndex, uint64(childIndex))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return early if the parent is not a synced_tip.
|
||||
f.syncedTips.Lock()
|
||||
defer f.syncedTips.Unlock()
|
||||
parentRoot := parent.root
|
||||
_, ok = f.syncedTips.validatedTips[parentRoot]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
leaves, err := f.store.leaves()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, i := range leaves {
|
||||
node = f.store.nodes[i]
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
// Return early if the parent is still a synced tip
|
||||
if node.root == parentRoot {
|
||||
return nil
|
||||
}
|
||||
_, ok = f.syncedTips.validatedTips[node.root]
|
||||
if ok {
|
||||
break
|
||||
}
|
||||
if node.parent == NonExistentNode {
|
||||
break
|
||||
}
|
||||
node = f.store.nodes[node.parent]
|
||||
}
|
||||
}
|
||||
delete(f.syncedTips.validatedTips, parentRoot)
|
||||
syncedTipsCount.Set(float64(len(f.syncedTips.validatedTips)))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -216,7 +216,7 @@ func TestOptimistic(t *testing.T) {
|
||||
// And every block in the Fork choice is optimistic. Synced_Tips contains a
|
||||
// single block that is outside of Fork choice
|
||||
//
|
||||
func TestUpdateSyncedTips(t *testing.T) {
|
||||
func TestUpdateSyncTipsWithValidRoots(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
@@ -235,7 +235,7 @@ func TestUpdateSyncedTips(t *testing.T) {
|
||||
tests := []struct {
|
||||
root [32]byte // the root of the new VALID block
|
||||
tips map[[32]byte]types.Slot // the old synced tips
|
||||
newtips map[[32]byte]types.Slot // the updated synced tips
|
||||
newTips map[[32]byte]types.Slot // the updated synced tips
|
||||
wantedErr error
|
||||
}{
|
||||
{
|
||||
@@ -321,14 +321,230 @@ func TestUpdateSyncedTips(t *testing.T) {
|
||||
f.syncedTips.Lock()
|
||||
f.syncedTips.validatedTips = tc.tips
|
||||
f.syncedTips.Unlock()
|
||||
err := f.UpdateSyncedTips(context.Background(), tc.root)
|
||||
err := f.UpdateSyncedTipsWithValidRoot(context.Background(), tc.root)
|
||||
if tc.wantedErr != nil {
|
||||
require.ErrorIs(t, err, tc.wantedErr)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
f.syncedTips.RLock()
|
||||
require.DeepEqual(t, f.syncedTips.validatedTips, tc.newtips)
|
||||
require.DeepEqual(t, f.syncedTips.validatedTips, tc.newTips)
|
||||
f.syncedTips.RUnlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We test the algorithm to update a node from SYNCING to INVALID
|
||||
// We start with the same diagram as above:
|
||||
//
|
||||
// E(2) -- F(1)
|
||||
// /
|
||||
// C(7) -- D(6)
|
||||
// / \
|
||||
// A(10) -- B(9) G(3) -- H(1) -- I(0)
|
||||
// \ \
|
||||
// J(1) -- K(1) -- L(0)
|
||||
//
|
||||
// And every block in the Fork choice is optimistic. Synced_Tips contains a
|
||||
// single block that is outside of Fork choice. The numbers in parentheses are
|
||||
// the weights of the nodes before removal
|
||||
//
|
||||
func TestUpdateSyncTipsWithInvalidRoot(t *testing.T) {
|
||||
tests := []struct {
|
||||
root [32]byte // the root of the new INVALID block
|
||||
tips map[[32]byte]types.Slot // the old synced tips
|
||||
wantedParentTip bool
|
||||
newBestChild uint64
|
||||
newBestDescendant uint64
|
||||
newParentWeight uint64
|
||||
}{
|
||||
{
|
||||
[32]byte{'j'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
},
|
||||
false,
|
||||
3,
|
||||
4,
|
||||
8,
|
||||
},
|
||||
{
|
||||
[32]byte{'j'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
},
|
||||
true,
|
||||
3,
|
||||
4,
|
||||
8,
|
||||
},
|
||||
{
|
||||
[32]byte{'i'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'h'}: 105,
|
||||
},
|
||||
true,
|
||||
NonExistentNode,
|
||||
NonExistentNode,
|
||||
1,
|
||||
},
|
||||
{
|
||||
[32]byte{'i'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
},
|
||||
false,
|
||||
NonExistentNode,
|
||||
NonExistentNode,
|
||||
1,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{}, 1, 1))
|
||||
weights := []uint64{10, 10, 9, 7, 1, 6, 2, 3, 1, 1, 1, 0, 0}
|
||||
f.syncedTips.Lock()
|
||||
f.syncedTips.validatedTips = tc.tips
|
||||
f.syncedTips.Unlock()
|
||||
f.store.nodesLock.Lock()
|
||||
for i, node := range f.store.nodes {
|
||||
node.weight = weights[i]
|
||||
}
|
||||
// Make j be the best child and descendant of b
|
||||
nodeB := f.store.nodes[2]
|
||||
nodeB.bestChild = 4
|
||||
nodeB.bestDescendant = 4
|
||||
idx := f.store.nodesIndices[tc.root]
|
||||
node := f.store.nodes[idx]
|
||||
parentIndex := node.parent
|
||||
require.NotEqual(t, NonExistentNode, parentIndex)
|
||||
parent := f.store.nodes[parentIndex]
|
||||
f.store.nodesLock.Unlock()
|
||||
err := f.UpdateSyncedTipsWithInvalidRoot(context.Background(), tc.root)
|
||||
require.NoError(t, err)
|
||||
f.syncedTips.RLock()
|
||||
_, parentSyncedTip := f.syncedTips.validatedTips[parent.root]
|
||||
f.syncedTips.RUnlock()
|
||||
require.Equal(t, tc.wantedParentTip, parentSyncedTip)
|
||||
require.Equal(t, tc.newBestChild, parent.bestChild)
|
||||
require.Equal(t, tc.newBestDescendant, parent.bestDescendant)
|
||||
require.Equal(t, tc.newParentWeight, parent.weight)
|
||||
}
|
||||
}
|
||||
|
||||
// This tests the algorithm to find the tip of a given node
|
||||
// We start with the following diagram
|
||||
//
|
||||
// E -- F
|
||||
// /
|
||||
// C -- D
|
||||
// / \
|
||||
// A -- B G -- H -- I
|
||||
// \ \
|
||||
// J -- K -- L
|
||||
//
|
||||
//
|
||||
func TestFindSyncedTip(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{}, 1, 1))
|
||||
tests := []struct {
|
||||
root [32]byte // the root of the block
|
||||
tips map[[32]byte]types.Slot // the synced tips
|
||||
wanted [32]byte // the root of expected tip
|
||||
}{
|
||||
{
|
||||
[32]byte{'i'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
},
|
||||
[32]byte{'g'},
|
||||
},
|
||||
{
|
||||
[32]byte{'g'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'h'}: 104,
|
||||
[32]byte{'k'}: 106,
|
||||
},
|
||||
[32]byte{'d'},
|
||||
},
|
||||
{
|
||||
[32]byte{'e'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 103,
|
||||
},
|
||||
[32]byte{'d'},
|
||||
},
|
||||
{
|
||||
[32]byte{'j'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'f'}: 105,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
[32]byte{'b'},
|
||||
},
|
||||
{
|
||||
[32]byte{'g'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'f'}: 105,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
[32]byte{'g'},
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
f.store.nodesLock.RLock()
|
||||
node := f.store.nodes[f.store.nodesIndices[tc.root]]
|
||||
syncedTips := &optimisticStore{
|
||||
validatedTips: tc.tips,
|
||||
}
|
||||
syncedTips.RLock()
|
||||
idx, err := f.store.findSyncedTip(ctx, node, syncedTips)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wanted, f.store.nodes[idx].root)
|
||||
|
||||
f.store.nodesLock.RUnlock()
|
||||
syncedTips.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,7 +143,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// Ancestors have the added weights of their children. Genesis is a special exception at 0 weight,
|
||||
require.Equal(t, f.store.nodes[0].weight, uint64(0))
|
||||
|
||||
// Otherwise assuming a block, A, that is not-genesis:
|
||||
// Otherwise, assuming a block, A, that is not-genesis:
|
||||
//
|
||||
// A -> B -> C
|
||||
//
|
||||
@@ -160,7 +160,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// (A: 54) -> (B: 44) -> (C: 24)
|
||||
// \_->(D: 10)
|
||||
//
|
||||
// So B has its own weight, 10, and the sum of of both C and D thats why we see weight 54 in the
|
||||
// So B has its own weight, 10, and the sum of both C and D. That's why we see weight 54 in the
|
||||
// middle instead of the normal progression of (44 -> 34 -> 24).
|
||||
require.Equal(t, f.store.nodes[1].weight, uint64(54))
|
||||
require.Equal(t, f.store.nodes[2].weight, uint64(44))
|
||||
|
||||
@@ -40,6 +40,33 @@ func New(justifiedEpoch, finalizedEpoch types.Epoch, finalizedRoot [32]byte) *Fo
|
||||
return &ForkChoice{store: s, balances: b, votes: v, syncedTips: st}
|
||||
}
|
||||
|
||||
// SetSyncedTips sets the synced and validated tips from the passed map
|
||||
func (f *ForkChoice) SetSyncedTips(tips map[[32]byte]types.Slot) error {
|
||||
if len(tips) == 0 {
|
||||
return errInvalidSyncedTips
|
||||
}
|
||||
newTips := make(map[[32]byte]types.Slot, len(tips))
|
||||
for k, v := range tips {
|
||||
newTips[k] = v
|
||||
}
|
||||
f.syncedTips.Lock()
|
||||
defer f.syncedTips.Unlock()
|
||||
f.syncedTips.validatedTips = newTips
|
||||
return nil
|
||||
}
|
||||
|
||||
// SyncedTips returns the synced and validated tips from the fork choice store.
|
||||
func (f *ForkChoice) SyncedTips() map[[32]byte]types.Slot {
|
||||
f.syncedTips.RLock()
|
||||
defer f.syncedTips.RUnlock()
|
||||
|
||||
m := make(map[[32]byte]types.Slot)
|
||||
for k, v := range f.syncedTips.validatedTips {
|
||||
m[k] = v
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Head returns the head root from fork choice store.
|
||||
// It firsts computes validator's balance changes then recalculates block tree from leaves to root.
|
||||
func (f *ForkChoice) Head(
|
||||
@@ -119,7 +146,7 @@ func (f *ForkChoice) ProcessBlock(
|
||||
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
|
||||
// root is different than the current store finalized root, and the number of the store has met prune threshold.
|
||||
func (f *ForkChoice) Prune(ctx context.Context, finalizedRoot [32]byte) error {
|
||||
return f.store.prune(ctx, finalizedRoot)
|
||||
return f.store.prune(ctx, finalizedRoot, f.syncedTips)
|
||||
}
|
||||
|
||||
// Nodes returns the copied list of block nodes in the fork choice store.
|
||||
@@ -268,7 +295,7 @@ func (s *Store) head(ctx context.Context, justifiedRoot [32]byte) ([32]byte, err
|
||||
|
||||
justifiedNode := s.nodes[justifiedIndex]
|
||||
bestDescendantIndex := justifiedNode.bestDescendant
|
||||
// If the justified node doesn't have a best descendent,
|
||||
// If the justified node doesn't have a best descendant,
|
||||
// the best node is itself.
|
||||
if bestDescendantIndex == NonExistentNode {
|
||||
bestDescendantIndex = justifiedIndex
|
||||
@@ -305,28 +332,38 @@ func (s *Store) updateCanonicalNodes(ctx context.Context, root [32]byte) error {
|
||||
defer span.End()
|
||||
|
||||
// Set the input node to canonical.
|
||||
s.canonicalNodes[root] = true
|
||||
|
||||
// Get the input's parent node index.
|
||||
i := s.nodesIndices[root]
|
||||
n := s.nodes[i]
|
||||
p := n.parent
|
||||
|
||||
for p != NonExistentNode {
|
||||
var newCanonicalRoots [][32]byte
|
||||
var n *Node
|
||||
for i != NonExistentNode {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
// Get the parent node, if the node is already in canonical mapping,
|
||||
// we can be sure rest of the ancestors are canonical. Exit early.
|
||||
n = s.nodes[p]
|
||||
n = s.nodes[i]
|
||||
if s.canonicalNodes[n.root] {
|
||||
break
|
||||
}
|
||||
|
||||
// Set parent node to canonical. Repeat until parent node index is undefined.
|
||||
s.canonicalNodes[n.root] = true
|
||||
p = n.parent
|
||||
newCanonicalRoots = append(newCanonicalRoots, n.root)
|
||||
i = n.parent
|
||||
}
|
||||
|
||||
// i is either NonExistentNode or has the index of the last canonical
|
||||
// node before the last head update.
|
||||
if i == NonExistentNode {
|
||||
s.canonicalNodes = make(map[[fieldparams.RootLength]byte]bool)
|
||||
} else {
|
||||
for j := i + 1; j < uint64(len(s.nodes)); j++ {
|
||||
delete(s.canonicalNodes, s.nodes[j].root)
|
||||
}
|
||||
}
|
||||
|
||||
for _, canonicalRoot := range newCanonicalRoots {
|
||||
s.canonicalNodes[canonicalRoot] = true
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -351,7 +388,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
|
||||
index := uint64(len(s.nodes))
|
||||
parentIndex, ok := s.nodesIndices[parent]
|
||||
// Mark genesis block's parent as non existent.
|
||||
// Mark genesis block's parent as non-existent.
|
||||
if !ok {
|
||||
parentIndex = NonExistentNode
|
||||
}
|
||||
@@ -371,7 +408,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
s.nodesIndices[root] = index
|
||||
s.nodes = append(s.nodes, n)
|
||||
|
||||
// Update parent with the best child and descendent only if it's available.
|
||||
// Update parent with the best child and descendant only if it's available.
|
||||
if n.parent != NonExistentNode {
|
||||
if err := s.updateBestChildAndDescendant(parentIndex, index); err != nil {
|
||||
return err
|
||||
@@ -387,8 +424,8 @@ func (s *Store) insert(ctx context.Context,
|
||||
|
||||
// applyWeightChanges iterates backwards through the nodes in store. It checks all nodes parent
|
||||
// and its best child. For each node, it updates the weight with input delta and
|
||||
// back propagate the nodes delta to its parents delta. After scoring changes,
|
||||
// the best child is then updated along with best descendant.
|
||||
// back propagate the nodes' delta to its parents' delta. After scoring changes,
|
||||
// the best child is then updated along with the best descendant.
|
||||
func (s *Store) applyWeightChanges(
|
||||
ctx context.Context, justifiedEpoch, finalizedEpoch types.Epoch, newBalances []uint64, delta []int,
|
||||
) error {
|
||||
@@ -439,33 +476,25 @@ func (s *Store) applyWeightChanges(
|
||||
}
|
||||
s.proposerBoostLock.Unlock()
|
||||
|
||||
// A node's weight can not be negative but the delta can be negative.
|
||||
if nodeDelta < 0 {
|
||||
// A node's weight can not be negative but the delta can be negative.
|
||||
if int(n.weight)+nodeDelta < 0 {
|
||||
d := uint64(-nodeDelta)
|
||||
if n.weight < d {
|
||||
n.weight = 0
|
||||
} else {
|
||||
// Absolute value of node delta.
|
||||
d := nodeDelta
|
||||
if nodeDelta < 0 {
|
||||
d *= -1
|
||||
}
|
||||
// Subtract node's weight.
|
||||
n.weight -= uint64(d)
|
||||
n.weight -= d
|
||||
}
|
||||
} else {
|
||||
// Add node's weight.
|
||||
n.weight += uint64(nodeDelta)
|
||||
}
|
||||
|
||||
s.nodes[i] = n
|
||||
|
||||
// Update parent's best child and descendent if the node has a known parent.
|
||||
// Update parent's best child and descendant if the node has a known parent.
|
||||
if n.parent != NonExistentNode {
|
||||
// Protection against node parent index out of bound. This should not happen.
|
||||
if int(n.parent) >= len(delta) {
|
||||
return errInvalidParentDelta
|
||||
}
|
||||
// Back propagate the nodes delta to its parent.
|
||||
// Back propagate the nodes' delta to its parent.
|
||||
delta[n.parent] += nodeDelta
|
||||
}
|
||||
}
|
||||
@@ -491,14 +520,14 @@ func (s *Store) applyWeightChanges(
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateBestChildAndDescendant updates parent node's best child and descendent.
|
||||
// updateBestChildAndDescendant updates parent node's best child and descendant.
|
||||
// It looks at input parent node and input child node and potentially modifies parent's best
|
||||
// child and best descendent indices.
|
||||
// child and best descendant indices.
|
||||
// There are four outcomes:
|
||||
// 1.) The child is already the best child but it's now invalid due to a FFG change and should be removed.
|
||||
// 1.) The child is already the best child, but it's now invalid due to a FFG change and should be removed.
|
||||
// 2.) The child is already the best child and the parent is updated with the new best descendant.
|
||||
// 3.) The child is not the best child but becomes the best child.
|
||||
// 4.) The child is not the best child and does not become best child.
|
||||
// 4.) The child is not the best child and does not become the best child.
|
||||
func (s *Store) updateBestChildAndDescendant(parentIndex, childIndex uint64) error {
|
||||
|
||||
// Protection against parent index out of bound, this should not happen.
|
||||
@@ -533,12 +562,12 @@ func (s *Store) updateBestChildAndDescendant(parentIndex, childIndex uint64) err
|
||||
|
||||
if parent.bestChild != NonExistentNode {
|
||||
if parent.bestChild == childIndex && !childLeadsToViableHead {
|
||||
// If the child is already the best child of the parent but it's not viable for head,
|
||||
// If the child is already the best child of the parent, but it's not viable for head,
|
||||
// we should remove it. (Outcome 1)
|
||||
newParentChild = changeToNone
|
||||
} else if parent.bestChild == childIndex {
|
||||
// If the child is already the best child of the parent, set it again to ensure best
|
||||
// descendent of the parent is updated. (Outcome 2)
|
||||
// If the child is already the best child of the parent, set it again to ensure the best
|
||||
// descendant of the parent is updated. (Outcome 2)
|
||||
newParentChild = changeToChild
|
||||
} else {
|
||||
// Protection against parent's best child going out of bound.
|
||||
@@ -553,7 +582,7 @@ func (s *Store) updateBestChildAndDescendant(parentIndex, childIndex uint64) err
|
||||
}
|
||||
|
||||
if childLeadsToViableHead && !bestChildLeadsToViableHead {
|
||||
// The child leads to a viable head, but the current parent's best child doesnt.
|
||||
// The child leads to a viable head, but the current parent's best child doesn't.
|
||||
newParentChild = changeToChild
|
||||
} else if !childLeadsToViableHead && bestChildLeadsToViableHead {
|
||||
// The child doesn't lead to a viable head, the current parent's best child does.
|
||||
@@ -596,7 +625,7 @@ func (s *Store) updateBestChildAndDescendant(parentIndex, childIndex uint64) err
|
||||
// prune prunes the store with the new finalized root. The tree is only
|
||||
// pruned if the input finalized root are different than the one in stored and
|
||||
// the number of the nodes in store has met prune threshold.
|
||||
func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
|
||||
func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *optimisticStore) error {
|
||||
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.prune")
|
||||
defer span.End()
|
||||
|
||||
@@ -618,12 +647,20 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
|
||||
|
||||
// Traverse through the node list starting from the finalized node at index 0.
|
||||
// Nodes that are not branching off from the finalized node will be removed.
|
||||
syncedTips.Lock()
|
||||
defer syncedTips.Unlock()
|
||||
|
||||
canonicalNodesMap := make(map[uint64]uint64, uint64(len(s.nodes))-finalizedIndex)
|
||||
canonicalNodes := make([]*Node, 1, uint64(len(s.nodes))-finalizedIndex)
|
||||
finalizedNode := s.nodes[finalizedIndex]
|
||||
finalizedTipIndex, err := s.findSyncedTip(ctx, finalizedNode, syncedTips)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finalizedNode.parent = NonExistentNode
|
||||
canonicalNodes[0] = finalizedNode
|
||||
canonicalNodesMap[finalizedIndex] = uint64(0)
|
||||
|
||||
for idx := uint64(0); idx < uint64(len(s.nodes)); idx++ {
|
||||
node := copyNode(s.nodes[idx])
|
||||
parentIdx, ok := canonicalNodesMap[node.parent]
|
||||
@@ -633,13 +670,17 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
|
||||
node.parent = parentIdx
|
||||
canonicalNodes = append(canonicalNodes, node)
|
||||
} else {
|
||||
// Remove node that is not part of finalized branch.
|
||||
// Remove node and synced tip that is not part of finalized branch.
|
||||
delete(s.nodesIndices, node.root)
|
||||
_, ok := syncedTips.validatedTips[node.root]
|
||||
if ok && idx != finalizedTipIndex {
|
||||
delete(syncedTips.validatedTips, node.root)
|
||||
}
|
||||
}
|
||||
}
|
||||
s.nodesIndices[finalizedRoot] = uint64(0)
|
||||
|
||||
// Recompute best child and descendant for each canonical nodes.
|
||||
// Recompute the best child and descendant for each canonical nodes.
|
||||
for _, node := range canonicalNodes {
|
||||
if node.bestChild != NonExistentNode {
|
||||
node.bestChild = canonicalNodesMap[node.bestChild]
|
||||
@@ -651,31 +692,31 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
|
||||
|
||||
s.nodes = canonicalNodes
|
||||
prunedCount.Inc()
|
||||
|
||||
syncedTipsCount.Set(float64(len(syncedTips.validatedTips)))
|
||||
return nil
|
||||
}
|
||||
|
||||
// leadsToViableHead returns true if the node or the best descendent of the node is viable for head.
|
||||
// leadsToViableHead returns true if the node or the best descendant of the node is viable for head.
|
||||
// Any node with diff finalized or justified epoch than the ones in fork choice store
|
||||
// should not be viable to head.
|
||||
func (s *Store) leadsToViableHead(node *Node) (bool, error) {
|
||||
var bestDescendentViable bool
|
||||
bestDescendentIndex := node.bestDescendant
|
||||
var bestDescendantViable bool
|
||||
bestDescendantIndex := node.bestDescendant
|
||||
|
||||
// If the best descendant is not part of the leaves.
|
||||
if bestDescendentIndex != NonExistentNode {
|
||||
// Protection against out of bound, best descendent index can not be
|
||||
if bestDescendantIndex != NonExistentNode {
|
||||
// Protection against out of bound, the best descendant index can not be
|
||||
// exceeds length of nodes list.
|
||||
if bestDescendentIndex >= uint64(len(s.nodes)) {
|
||||
if bestDescendantIndex >= uint64(len(s.nodes)) {
|
||||
return false, errInvalidBestDescendantIndex
|
||||
}
|
||||
|
||||
bestDescendentNode := s.nodes[bestDescendentIndex]
|
||||
bestDescendentViable = s.viableForHead(bestDescendentNode)
|
||||
bestDescendantNode := s.nodes[bestDescendantIndex]
|
||||
bestDescendantViable = s.viableForHead(bestDescendantNode)
|
||||
}
|
||||
|
||||
// The node is viable as long as the best descendent is viable.
|
||||
return bestDescendentViable || s.viableForHead(node), nil
|
||||
// The node is viable as long as the best descendant is viable.
|
||||
return bestDescendantViable || s.viableForHead(node), nil
|
||||
}
|
||||
|
||||
// viableForHead returns true if the node is viable to head.
|
||||
@@ -689,3 +730,17 @@ func (s *Store) viableForHead(node *Node) bool {
|
||||
|
||||
return justified && finalized
|
||||
}
|
||||
|
||||
// Returns the list of leaves in the Fork Choice store.
|
||||
// These are all the nodes that have NonExistentNode as best child.
|
||||
// This internal method assumes that the caller holds a lock in s.nodesLock.
|
||||
func (s *Store) leaves() ([]uint64, error) {
|
||||
var leaves []uint64
|
||||
for i := uint64(0); i < uint64(len(s.nodes)); i++ {
|
||||
node := s.nodes[i]
|
||||
if node.bestChild == NonExistentNode {
|
||||
leaves = append(leaves, i)
|
||||
}
|
||||
}
|
||||
return leaves, nil
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
@@ -116,12 +117,11 @@ func TestStore_Head_UnknownJustifiedIndex(t *testing.T) {
|
||||
|
||||
func TestStore_Head_Itself(t *testing.T) {
|
||||
r := [32]byte{'A'}
|
||||
indices := make(map[[32]byte]uint64)
|
||||
indices[r] = 0
|
||||
indices := map[[32]byte]uint64{r: 0}
|
||||
|
||||
// Since the justified node does not have a best descendant so the best node
|
||||
// is itself.
|
||||
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, bestDescendant: NonExistentNode}}, canonicalNodes: make(map[[32]byte]bool)}
|
||||
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, parent: NonExistentNode, bestDescendant: NonExistentNode}}, canonicalNodes: make(map[[32]byte]bool)}
|
||||
h, err := s.head(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, r, h)
|
||||
@@ -130,12 +130,11 @@ func TestStore_Head_Itself(t *testing.T) {
|
||||
func TestStore_Head_BestDescendant(t *testing.T) {
|
||||
r := [32]byte{'A'}
|
||||
best := [32]byte{'B'}
|
||||
indices := make(map[[32]byte]uint64)
|
||||
indices[r] = 0
|
||||
indices := map[[32]byte]uint64{r: 0, best: 1}
|
||||
|
||||
// Since the justified node's best descendent is at index 1 and it's root is `best`,
|
||||
// Since the justified node's best descendant is at index 1, and its root is `best`,
|
||||
// the head should be `best`.
|
||||
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, bestDescendant: 1}, {root: best}}, canonicalNodes: make(map[[32]byte]bool)}
|
||||
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, bestDescendant: 1, parent: NonExistentNode}, {root: best, parent: 0}}, canonicalNodes: make(map[[32]byte]bool)}
|
||||
h, err := s.head(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, best, h)
|
||||
@@ -145,9 +144,9 @@ func TestStore_Head_ContextCancelled(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
r := [32]byte{'A'}
|
||||
best := [32]byte{'B'}
|
||||
indices := make(map[[32]byte]uint64)
|
||||
indices[r] = 0
|
||||
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, bestDescendant: 1}, {root: best}}, canonicalNodes: make(map[[32]byte]bool)}
|
||||
indices := map[[32]byte]uint64{r: 0, best: 1}
|
||||
|
||||
s := &Store{nodesIndices: indices, nodes: []*Node{{root: r, parent: NonExistentNode, bestDescendant: 1}, {root: best, parent: 0}}, canonicalNodes: make(map[[32]byte]bool)}
|
||||
cancel()
|
||||
_, err := s.head(ctx, r)
|
||||
require.ErrorContains(t, "context canceled", err)
|
||||
@@ -264,7 +263,7 @@ func TestStore_UpdateBestChildAndDescendant_UpdateDescendant(t *testing.T) {
|
||||
|
||||
func TestStore_UpdateBestChildAndDescendant_ChangeChildByViability(t *testing.T) {
|
||||
// Make parent's best child not equal to child index, child leads to viable index and
|
||||
// parents best child doesnt lead to viable index.
|
||||
// parent's best child doesn't lead to viable index.
|
||||
s := &Store{
|
||||
justifiedEpoch: 1,
|
||||
finalizedEpoch: 1,
|
||||
@@ -361,16 +360,39 @@ func TestStore_Prune_LessThanThreshold(t *testing.T) {
|
||||
numOfNodes := 100
|
||||
indices := make(map[[32]byte]uint64)
|
||||
nodes := make([]*Node, 0)
|
||||
for i := 0; i < numOfNodes; i++ {
|
||||
indices[indexToHash(uint64(0))] = uint64(0)
|
||||
nodes = append(nodes, &Node{
|
||||
slot: types.Slot(0),
|
||||
root: indexToHash(uint64(0)),
|
||||
bestDescendant: uint64(numOfNodes - 1),
|
||||
bestChild: uint64(1),
|
||||
parent: NonExistentNode,
|
||||
})
|
||||
for i := 1; i < numOfNodes-1; i++ {
|
||||
indices[indexToHash(uint64(i))] = uint64(i)
|
||||
nodes = append(nodes, &Node{slot: types.Slot(i)})
|
||||
nodes = append(nodes, &Node{
|
||||
slot: types.Slot(i),
|
||||
root: indexToHash(uint64(i)),
|
||||
bestDescendant: uint64(numOfNodes - 1),
|
||||
bestChild: uint64(i + 1),
|
||||
parent: uint64(i) - 1,
|
||||
})
|
||||
}
|
||||
indices[indexToHash(uint64(numOfNodes-1))] = uint64(numOfNodes - 1)
|
||||
nodes = append(nodes, &Node{
|
||||
slot: types.Slot(numOfNodes - 1),
|
||||
root: indexToHash(uint64(numOfNodes - 1)),
|
||||
bestDescendant: NonExistentNode,
|
||||
bestChild: NonExistentNode,
|
||||
parent: uint64(numOfNodes - 2),
|
||||
})
|
||||
|
||||
s := &Store{nodes: nodes, nodesIndices: indices, pruneThreshold: 100}
|
||||
syncedTips := &optimisticStore{}
|
||||
|
||||
// Finalized root is at index 99 so everything before 99 should be pruned,
|
||||
// but PruneThreshold is at 100 so nothing will be pruned.
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(99), syncedTips))
|
||||
assert.Equal(t, 100, len(s.nodes), "Incorrect nodes count")
|
||||
assert.Equal(t, 100, len(s.nodesIndices), "Incorrect node indices count")
|
||||
}
|
||||
@@ -380,16 +402,37 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
|
||||
numOfNodes := 100
|
||||
indices := make(map[[32]byte]uint64)
|
||||
nodes := make([]*Node, 0)
|
||||
for i := 0; i < numOfNodes; i++ {
|
||||
indices[indexToHash(uint64(0))] = uint64(0)
|
||||
nodes = append(nodes, &Node{
|
||||
slot: types.Slot(0),
|
||||
root: indexToHash(uint64(0)),
|
||||
bestDescendant: uint64(numOfNodes - 1),
|
||||
bestChild: uint64(1),
|
||||
parent: NonExistentNode,
|
||||
})
|
||||
for i := 1; i < numOfNodes-1; i++ {
|
||||
indices[indexToHash(uint64(i))] = uint64(i)
|
||||
nodes = append(nodes, &Node{slot: types.Slot(i), root: indexToHash(uint64(i)),
|
||||
bestDescendant: NonExistentNode, bestChild: NonExistentNode})
|
||||
nodes = append(nodes, &Node{
|
||||
slot: types.Slot(i),
|
||||
root: indexToHash(uint64(i)),
|
||||
bestDescendant: uint64(numOfNodes - 1),
|
||||
bestChild: uint64(i + 1),
|
||||
parent: uint64(i) - 1,
|
||||
})
|
||||
}
|
||||
|
||||
nodes = append(nodes, &Node{
|
||||
slot: types.Slot(numOfNodes - 1),
|
||||
root: indexToHash(uint64(numOfNodes - 1)),
|
||||
bestDescendant: NonExistentNode,
|
||||
bestChild: NonExistentNode,
|
||||
parent: uint64(numOfNodes - 2),
|
||||
})
|
||||
indices[indexToHash(uint64(numOfNodes-1))] = uint64(numOfNodes - 1)
|
||||
s := &Store{nodes: nodes, nodesIndices: indices}
|
||||
syncedTips := &optimisticStore{}
|
||||
|
||||
// Finalized root is at index 99 so everything before 99 should be pruned.
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(99), syncedTips))
|
||||
assert.Equal(t, 1, len(s.nodes), "Incorrect nodes count")
|
||||
assert.Equal(t, 1, len(s.nodesIndices), "Incorrect node indices count")
|
||||
}
|
||||
@@ -425,14 +468,15 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
|
||||
})
|
||||
|
||||
s := &Store{nodes: nodes, nodesIndices: indices}
|
||||
syncedTips := &optimisticStore{}
|
||||
|
||||
// Finalized root is at index 11 so everything before 11 should be pruned.
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(10)))
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(10), syncedTips))
|
||||
assert.Equal(t, 90, len(s.nodes), "Incorrect nodes count")
|
||||
assert.Equal(t, 90, len(s.nodesIndices), "Incorrect node indices count")
|
||||
|
||||
// One more time.
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(20)))
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(20), syncedTips))
|
||||
assert.Equal(t, 80, len(s.nodes), "Incorrect nodes count")
|
||||
assert.Equal(t, 80, len(s.nodesIndices), "Incorrect node indices count")
|
||||
}
|
||||
@@ -468,6 +512,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
bestDescendant: NonExistentNode,
|
||||
},
|
||||
}
|
||||
syncedTips := &optimisticStore{}
|
||||
s := &Store{
|
||||
pruneThreshold: 0,
|
||||
nodes: nodes,
|
||||
@@ -477,10 +522,56 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
indexToHash(uint64(2)): 2,
|
||||
},
|
||||
}
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1))))
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1)), syncedTips))
|
||||
require.Equal(t, len(s.nodes), 1)
|
||||
}
|
||||
|
||||
// This test starts with the following branching diagram
|
||||
/// We start with the following diagram
|
||||
//
|
||||
// E -- F
|
||||
// /
|
||||
// C -- D
|
||||
// / \
|
||||
// A -- B G -- H -- I
|
||||
// \ \
|
||||
// J -- K -- L
|
||||
//
|
||||
//
|
||||
// Synced tips are B, D and E. And we finalize F. All that is left in fork
|
||||
// choice is F, and the only synced tip left is E which is now away from Fork
|
||||
// Choice.
|
||||
func TestStore_PruneSyncedTips(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.ProcessBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{}, 1, 1))
|
||||
require.NoError(t, f.ProcessBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{}, 1, 1))
|
||||
syncedTips := &optimisticStore{
|
||||
validatedTips: map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'e'}: 104,
|
||||
},
|
||||
}
|
||||
f.syncedTips = syncedTips
|
||||
f.store.pruneThreshold = 0
|
||||
require.NoError(t, f.Prune(ctx, [32]byte{'f'}))
|
||||
require.Equal(t, 1, len(f.syncedTips.validatedTips))
|
||||
_, ok := f.syncedTips.validatedTips[[32]byte{'e'}]
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func TestStore_LeadsToViableHead(t *testing.T) {
|
||||
tests := []struct {
|
||||
n *Node
|
||||
@@ -507,6 +598,20 @@ func TestStore_LeadsToViableHead(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_SetSyncedTips(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
tips := make(map[[32]byte]types.Slot)
|
||||
require.ErrorIs(t, errInvalidSyncedTips, f.SetSyncedTips(tips))
|
||||
tips[bytesutil.ToBytes32([]byte{'a'})] = 1
|
||||
require.NoError(t, f.SetSyncedTips(tips))
|
||||
f.syncedTips.RLock()
|
||||
defer f.syncedTips.RUnlock()
|
||||
require.Equal(t, 1, len(f.syncedTips.validatedTips))
|
||||
slot, ok := f.syncedTips.validatedTips[bytesutil.ToBytes32([]byte{'a'})]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, types.Slot(1), slot)
|
||||
}
|
||||
|
||||
func TestStore_ViableForHead(t *testing.T) {
|
||||
tests := []struct {
|
||||
n *Node
|
||||
@@ -613,7 +718,7 @@ func TestStore_UpdateCanonicalNodes_WholeList(t *testing.T) {
|
||||
f.store.nodesIndices[[32]byte{'c'}] = 2
|
||||
require.NoError(t, f.store.updateCanonicalNodes(ctx, [32]byte{'c'}))
|
||||
require.Equal(t, len(f.store.nodes), len(f.store.canonicalNodes))
|
||||
require.Equal(t, true, f.IsCanonical([32]byte{'c'}))
|
||||
require.Equal(t, true, f.IsCanonical([32]byte{'a'}))
|
||||
require.Equal(t, true, f.IsCanonical([32]byte{'b'}))
|
||||
require.Equal(t, true, f.IsCanonical([32]byte{'c'}))
|
||||
require.DeepEqual(t, f.Node([32]byte{'c'}), f.store.nodes[2])
|
||||
@@ -653,3 +758,34 @@ func TestStore_UpdateCanonicalNodes_ContextCancelled(t *testing.T) {
|
||||
cancel()
|
||||
require.ErrorContains(t, "context canceled", f.store.updateCanonicalNodes(ctx, [32]byte{'c'}))
|
||||
}
|
||||
|
||||
func TestStore_UpdateCanonicalNodes_RemoveOldCanonical(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := &ForkChoice{store: &Store{}}
|
||||
f.store.canonicalNodes = map[[32]byte]bool{}
|
||||
f.store.nodesIndices = map[[32]byte]uint64{
|
||||
[32]byte{'a'}: 0,
|
||||
[32]byte{'b'}: 1,
|
||||
[32]byte{'c'}: 2,
|
||||
[32]byte{'d'}: 3,
|
||||
[32]byte{'e'}: 4,
|
||||
}
|
||||
|
||||
f.store.nodes = []*Node{
|
||||
{slot: 1, root: [32]byte{'a'}, parent: NonExistentNode},
|
||||
{slot: 2, root: [32]byte{'b'}, parent: 0},
|
||||
{slot: 3, root: [32]byte{'c'}, parent: 1},
|
||||
{slot: 4, root: [32]byte{'d'}, parent: 1},
|
||||
{slot: 5, root: [32]byte{'e'}, parent: 3},
|
||||
}
|
||||
require.NoError(t, f.store.updateCanonicalNodes(ctx, [32]byte{'c'}))
|
||||
require.Equal(t, 3, len(f.store.canonicalNodes))
|
||||
require.NoError(t, f.store.updateCanonicalNodes(ctx, [32]byte{'e'}))
|
||||
require.Equal(t, 4, len(f.store.canonicalNodes))
|
||||
require.Equal(t, true, f.IsCanonical([32]byte{'a'}))
|
||||
require.Equal(t, true, f.IsCanonical([32]byte{'b'}))
|
||||
require.Equal(t, true, f.IsCanonical([32]byte{'d'}))
|
||||
require.Equal(t, true, f.IsCanonical([32]byte{'e'}))
|
||||
_, ok := f.store.canonicalNodes[[32]byte{'c'}]
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
func TestVotes_CanFindHead(t *testing.T) {
|
||||
balances := []uint64{1, 1}
|
||||
f := setup(1, 1)
|
||||
syncedTips := &optimisticStore{}
|
||||
|
||||
// The head should always start at the finalized block.
|
||||
r, err := f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
@@ -248,7 +249,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
|
||||
// Verify pruning below the prune threshold does not affect head.
|
||||
f.store.pruneThreshold = 1000
|
||||
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
|
||||
require.NoError(t, f.store.prune(context.Background(), indexToHash(5), syncedTips))
|
||||
assert.Equal(t, 11, len(f.store.nodes), "Incorrect nodes length after prune")
|
||||
|
||||
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
|
||||
@@ -272,7 +273,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// / \
|
||||
// 9 10
|
||||
f.store.pruneThreshold = 1
|
||||
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
|
||||
require.NoError(t, f.store.prune(context.Background(), indexToHash(5), syncedTips))
|
||||
assert.Equal(t, 5, len(f.store.nodes), "Incorrect nodes length after prune")
|
||||
|
||||
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
|
||||
|
||||
@@ -45,6 +45,14 @@ func configureHistoricalSlasher(cliCtx *cli.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func configureSafeSlotsToImportOptimistically(cliCtx *cli.Context) {
|
||||
if cliCtx.IsSet(flags.SafeSlotsToImportOptimistically.Name) {
|
||||
c := params.BeaconConfig()
|
||||
c.SafeSlotsToImportOptimistically = types.Slot(cliCtx.Int(flags.SafeSlotsToImportOptimistically.Name))
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
}
|
||||
|
||||
func configureSlotsPerArchivedPoint(cliCtx *cli.Context) {
|
||||
if cliCtx.IsSet(flags.SlotsPerArchivedPoint.Name) {
|
||||
c := params.BeaconConfig()
|
||||
|
||||
@@ -37,6 +37,20 @@ func TestConfigureHistoricalSlasher(t *testing.T) {
|
||||
)
|
||||
}
|
||||
|
||||
func TestConfigureSafeSlotsToImportOptimistically(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.Int(flags.SafeSlotsToImportOptimistically.Name, 0, "")
|
||||
require.NoError(t, set.Set(flags.SafeSlotsToImportOptimistically.Name, strconv.Itoa(128)))
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
|
||||
configureSafeSlotsToImportOptimistically(cliCtx)
|
||||
|
||||
assert.Equal(t, types.Slot(128), params.BeaconConfig().SafeSlotsToImportOptimistically)
|
||||
}
|
||||
|
||||
func TestConfigureSlotsPerArchivedPoint(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
|
||||
@@ -114,10 +114,12 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
flags.ConfigureGlobalFlags(cliCtx)
|
||||
configureChainConfig(cliCtx)
|
||||
configureHistoricalSlasher(cliCtx)
|
||||
configureSafeSlotsToImportOptimistically(cliCtx)
|
||||
configureSlotsPerArchivedPoint(cliCtx)
|
||||
configureEth1Config(cliCtx)
|
||||
configureNetwork(cliCtx)
|
||||
configureInteropConfig(cliCtx)
|
||||
configureExecutionSetting(cliCtx)
|
||||
|
||||
// Initializes any forks here.
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
@@ -153,65 +155,80 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugln("Starting DB")
|
||||
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Starting Slashing DB")
|
||||
if err := beacon.startSlasherDB(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Starting State Gen")
|
||||
if err := beacon.startStateGen(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering P2P Service")
|
||||
if err := beacon.registerP2P(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering POW Chain Service")
|
||||
if err := beacon.registerPOWChainService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Attestation Pool Service")
|
||||
if err := beacon.registerAttestationPool(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Determinstic Genesis Service")
|
||||
if err := beacon.registerDeterminsticGenesisService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Starting Fork Choice")
|
||||
beacon.startForkChoice()
|
||||
|
||||
log.Debugln("Registering Blockchain Service")
|
||||
if err := beacon.registerBlockchainService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Intial Sync Service")
|
||||
if err := beacon.registerInitialSyncService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Sync Service")
|
||||
if err := beacon.registerSyncService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Slasher Service")
|
||||
if err := beacon.registerSlasherService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering RPC Service")
|
||||
if err := beacon.registerRPCService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering GRPC Gateway Service")
|
||||
if err := beacon.registerGRPCGateway(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Registering Validator Monitoring Service")
|
||||
if err := beacon.registerValidatorMonitorService(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !cliCtx.Bool(cmd.DisableMonitoringFlag.Name) {
|
||||
log.Debugln("Registering Prometheus Service")
|
||||
if err := beacon.registerPrometheusService(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -538,6 +555,7 @@ func (b *BeaconNode) registerBlockchainService() error {
|
||||
blockchain.WithDatabase(b.db),
|
||||
blockchain.WithDepositCache(b.depositCache),
|
||||
blockchain.WithChainStartFetcher(web3Service),
|
||||
blockchain.WithExecutionEngineCaller(web3Service.EngineAPIClient()),
|
||||
blockchain.WithAttestationPool(b.attestationPool),
|
||||
blockchain.WithExitPool(b.exitPool),
|
||||
blockchain.WithSlashingPool(b.slashingsPool),
|
||||
@@ -764,6 +782,7 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
StateGen: b.stateGen,
|
||||
EnableDebugRPCEndpoints: enableDebugRPCEndpoints,
|
||||
MaxMsgSize: maxMsgSize,
|
||||
ExecutionEngineCaller: web3Service.EngineAPIClient(),
|
||||
})
|
||||
|
||||
return b.services.RegisterService(rpcService)
|
||||
|
||||
@@ -98,7 +98,7 @@ func (s *Service) PublishToTopic(ctx context.Context, topic string, data []byte,
|
||||
|
||||
// SubscribeToTopic joins (if necessary) and subscribes to PubSub topic.
|
||||
func (s *Service) SubscribeToTopic(topic string, opts ...pubsub.SubOpt) (*pubsub.Subscription, error) {
|
||||
s.awaitStateInitialized() // Genesis time and genesis validator root are required to subscribe.
|
||||
s.awaitStateInitialized() // Genesis time and genesis validators root are required to subscribe.
|
||||
|
||||
topicHandle, err := s.JoinTopic(topic)
|
||||
if err != nil {
|
||||
|
||||
@@ -490,7 +490,7 @@ func (s *Service) connectToBootnodes() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns true if the service is aware of the genesis time and genesis validator root. This is
|
||||
// Returns true if the service is aware of the genesis time and genesis validators root. This is
|
||||
// required for discovery and pubsub validation.
|
||||
func (s *Service) isInitialized() bool {
|
||||
return !s.genesisTime.IsZero() && len(s.genesisValidatorsRoot) == 32
|
||||
|
||||
@@ -306,7 +306,7 @@ func TestService_JoinLeaveTopic(t *testing.T) {
|
||||
// digest associated with that genesis event.
|
||||
func initializeStateWithForkDigest(ctx context.Context, t *testing.T, ef *event.Feed) [4]byte {
|
||||
gt := prysmTime.Now()
|
||||
gvr := bytesutil.PadTo([]byte("genesis validator root"), 32)
|
||||
gvr := bytesutil.PadTo([]byte("genesis validators root"), 32)
|
||||
for n := 0; n == 0; {
|
||||
if ctx.Err() != nil {
|
||||
t.Fatal(ctx.Err())
|
||||
|
||||
@@ -27,10 +27,13 @@ go_library(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/powchain/engine-api-client/v1:go_default_library",
|
||||
"//beacon-chain/powchain/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native/v1:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//contracts/deposit:go_default_library",
|
||||
|
||||
42
beacon-chain/powchain/engine-api-client/v1/BUILD.bazel
Normal file
42
beacon-chain/powchain/engine-api-client/v1/BUILD.bazel
Normal file
@@ -0,0 +1,42 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"auth.go",
|
||||
"client.go",
|
||||
"errors.go",
|
||||
"options.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_golang_jwt_jwt//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"auth_test.go",
|
||||
"client_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_golang_jwt_jwt//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
44
beacon-chain/powchain/engine-api-client/v1/auth.go
Normal file
44
beacon-chain/powchain/engine-api-client/v1/auth.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/golang-jwt/jwt"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// This creates a custom HTTP transport which we can attach to our HTTP client
|
||||
// in order to inject JWT auth strings into our HTTP request headers. Authentication
|
||||
// is required when interacting with an Ethereum engine API server via HTTP, and JWT
|
||||
// is chosen as the scheme of choice.
|
||||
// For more details on the requirements of authentication when using the engine API, see
|
||||
// the specification here: https://github.com/ethereum/execution-apis/blob/main/src/engine/authentication.md
|
||||
//
|
||||
// To use this transport, initialize a new &http.Client{} from the standard library
|
||||
// and set the Transport field to &jwtTransport{} with values
|
||||
// http.DefaultTransport and a JWT secret.
|
||||
type jwtTransport struct {
|
||||
underlyingTransport http.RoundTripper
|
||||
jwtSecret []byte
|
||||
}
|
||||
|
||||
// RoundTrip ensures our transport implements http.RoundTripper interface from the
|
||||
// standard library. When used as the transport for an HTTP client, the code below
|
||||
// will run every time our client makes an HTTP request. This is used to inject
|
||||
// an JWT bearer token in the Authorization request header of every outgoing request
|
||||
// our HTTP client makes.
|
||||
func (t *jwtTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{
|
||||
// Required claim for engine API auth. "iat" stands for issued at
|
||||
// and it must be a unix timestamp that is +/- 5 seconds from the current
|
||||
// timestamp at the moment the server verifies this value.
|
||||
"iat": time.Now().Unix(),
|
||||
})
|
||||
tokenString, err := token.SignedString(t.jwtSecret)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not produce signed JWT token")
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+tokenString)
|
||||
return t.underlyingTransport.RoundTrip(req)
|
||||
}
|
||||
53
beacon-chain/powchain/engine-api-client/v1/auth_test.go
Normal file
53
beacon-chain/powchain/engine-api-client/v1/auth_test.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang-jwt/jwt"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestJWTAuthTransport(t *testing.T) {
|
||||
secret := bytesutil.PadTo([]byte("foo"), 32)
|
||||
authTransport := &jwtTransport{
|
||||
underlyingTransport: http.DefaultTransport,
|
||||
jwtSecret: secret,
|
||||
}
|
||||
client := &http.Client{
|
||||
Timeout: DefaultTimeout,
|
||||
Transport: authTransport,
|
||||
}
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
reqToken := r.Header.Get("Authorization")
|
||||
splitToken := strings.Split(reqToken, "Bearer")
|
||||
// The format should be `Bearer ${token}`.
|
||||
require.Equal(t, 2, len(splitToken))
|
||||
reqToken = strings.TrimSpace(splitToken[1])
|
||||
token, err := jwt.Parse(reqToken, func(token *jwt.Token) (interface{}, error) {
|
||||
// We should be doing HMAC signing.
|
||||
_, ok := token.Method.(*jwt.SigningMethodHMAC)
|
||||
require.Equal(t, true, ok)
|
||||
return secret, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, token.Valid)
|
||||
claims, ok := token.Claims.(jwt.MapClaims)
|
||||
require.Equal(t, true, ok)
|
||||
item, ok := claims["iat"]
|
||||
require.Equal(t, true, ok)
|
||||
iat, ok := item.(float64)
|
||||
require.Equal(t, true, ok)
|
||||
issuedAt := time.Unix(int64(iat), 0)
|
||||
// The claims should have an "iat" field (issued at) that is at most, 5 seconds ago.
|
||||
since := time.Since(issuedAt)
|
||||
require.Equal(t, true, since <= time.Second*5)
|
||||
}))
|
||||
defer srv.Close()
|
||||
_, err := client.Get(srv.URL)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
206
beacon-chain/powchain/engine-api-client/v1/client.go
Normal file
206
beacon-chain/powchain/engine-api-client/v1/client.go
Normal file
@@ -0,0 +1,206 @@
|
||||
// Package v1 defines an API client for the engine API defined in https://github.com/ethereum/execution-apis.
|
||||
// This client is used for the Prysm consensus node to connect to execution node as part of
|
||||
// the Ethereum proof-of-stake machinery.
|
||||
package v1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"math/big"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
// NewPayloadMethod v1 request string for JSON-RPC.
|
||||
NewPayloadMethod = "engine_newPayloadV1"
|
||||
// ForkchoiceUpdatedMethod v1 request string for JSON-RPC.
|
||||
ForkchoiceUpdatedMethod = "engine_forkchoiceUpdatedV1"
|
||||
// GetPayloadMethod v1 request string for JSON-RPC.
|
||||
GetPayloadMethod = "engine_getPayloadV1"
|
||||
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
|
||||
ExchangeTransitionConfigurationMethod = "engine_exchangeTransitionConfigurationV1"
|
||||
// ExecutionBlockByHashMethod request string for JSON-RPC.
|
||||
ExecutionBlockByHashMethod = "eth_getBlockByHash"
|
||||
// ExecutionBlockByNumberMethod request string for JSON-RPC.
|
||||
ExecutionBlockByNumberMethod = "eth_getBlockByNumber"
|
||||
// DefaultTimeout for HTTP.
|
||||
DefaultTimeout = time.Second * 5
|
||||
)
|
||||
|
||||
// ForkchoiceUpdatedResponse is the response kind received by the
|
||||
// engine_forkchoiceUpdatedV1 endpoint.
|
||||
type ForkchoiceUpdatedResponse struct {
|
||||
Status *pb.PayloadStatus `json:"payloadStatus"`
|
||||
PayloadId *pb.PayloadIDBytes `json:"payloadId"`
|
||||
}
|
||||
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
// execution node's engine service via JSON-RPC.
|
||||
type EngineCaller interface {
|
||||
NewPayload(ctx context.Context, payload *pb.ExecutionPayload) (*pb.PayloadStatus, error)
|
||||
ForkchoiceUpdated(
|
||||
ctx context.Context, state *pb.ForkchoiceState, attrs *pb.PayloadAttributes,
|
||||
) (*ForkchoiceUpdatedResponse, error)
|
||||
GetPayload(ctx context.Context, payloadId [8]byte) (*pb.ExecutionPayload, error)
|
||||
ExchangeTransitionConfiguration(
|
||||
ctx context.Context, cfg *pb.TransitionConfiguration,
|
||||
) (*pb.TransitionConfiguration, error)
|
||||
LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error)
|
||||
ExecutionBlockByHash(ctx context.Context, hash common.Hash) (*pb.ExecutionBlock, error)
|
||||
}
|
||||
|
||||
// Client defines a new engine API client for the Prysm consensus node
|
||||
// to interact with an Ethereum execution node.
|
||||
type Client struct {
|
||||
cfg *config
|
||||
rpc *rpc.Client
|
||||
}
|
||||
|
||||
// New returns a ready, engine API client from an endpoint and configuration options.
|
||||
// Only http(s) and ipc (inter-process communication) URL schemes are supported.
|
||||
func New(ctx context.Context, endpoint string, opts ...Option) (*Client, error) {
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
cfg: defaultConfig(),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "http", "https":
|
||||
c.rpc, err = rpc.DialHTTPWithClient(endpoint, c.cfg.httpClient)
|
||||
case "":
|
||||
c.rpc, err = rpc.DialIPC(ctx, endpoint)
|
||||
default:
|
||||
return nil, errors.Wrapf(ErrUnsupportedScheme, "%q", u.Scheme)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// NewPayload calls the engine_newPayloadV1 method via JSON-RPC.
|
||||
func (c *Client) NewPayload(ctx context.Context, payload *pb.ExecutionPayload) (*pb.PayloadStatus, error) {
|
||||
result := &pb.PayloadStatus{}
|
||||
err := c.rpc.CallContext(ctx, result, NewPayloadMethod, payload)
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// ForkchoiceUpdated calls the engine_forkchoiceUpdatedV1 method via JSON-RPC.
|
||||
func (c *Client) ForkchoiceUpdated(
|
||||
ctx context.Context, state *pb.ForkchoiceState, attrs *pb.PayloadAttributes,
|
||||
) (*ForkchoiceUpdatedResponse, error) {
|
||||
result := &ForkchoiceUpdatedResponse{}
|
||||
err := c.rpc.CallContext(ctx, result, ForkchoiceUpdatedMethod, state, attrs)
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// GetPayload calls the engine_getPayloadV1 method via JSON-RPC.
|
||||
func (c *Client) GetPayload(ctx context.Context, payloadId [8]byte) (*pb.ExecutionPayload, error) {
|
||||
result := &pb.ExecutionPayload{}
|
||||
err := c.rpc.CallContext(ctx, result, GetPayloadMethod, pb.PayloadIDBytes(payloadId))
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// ExchangeTransitionConfiguration calls the engine_exchangeTransitionConfigurationV1 method via JSON-RPC.
|
||||
func (c *Client) ExchangeTransitionConfiguration(
|
||||
ctx context.Context, cfg *pb.TransitionConfiguration,
|
||||
) (*pb.TransitionConfiguration, error) {
|
||||
// We set terminal block number to 0 as the parameter is not set on the consensus layer.
|
||||
zeroBigNum := big.NewInt(0)
|
||||
cfg.TerminalBlockNumber = zeroBigNum.Bytes()
|
||||
result := &pb.TransitionConfiguration{}
|
||||
if err := c.rpc.CallContext(ctx, result, ExchangeTransitionConfigurationMethod, cfg); err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
// We surface an error to the user if local configuration settings mismatch
|
||||
// according to the response from the execution node.
|
||||
cfgTerminalHash := params.BeaconConfig().TerminalBlockHash[:]
|
||||
if !bytes.Equal(cfgTerminalHash, result.TerminalBlockHash) {
|
||||
return nil, errors.Wrapf(
|
||||
ErrMismatchTerminalBlockHash,
|
||||
"got %#x from execution node, wanted %#x",
|
||||
result.TerminalBlockHash,
|
||||
cfgTerminalHash,
|
||||
)
|
||||
}
|
||||
ttdCfg := params.BeaconConfig().TerminalTotalDifficulty
|
||||
if ttdCfg != result.TerminalTotalDifficulty {
|
||||
return nil, errors.Wrapf(
|
||||
ErrMismatchTerminalTotalDiff,
|
||||
"got %s from execution node, wanted %s",
|
||||
result.TerminalTotalDifficulty,
|
||||
ttdCfg,
|
||||
)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// LatestExecutionBlock fetches the latest execution engine block by calling
|
||||
// eth_blockByNumber via JSON-RPC.
|
||||
func (c *Client) LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error) {
|
||||
result := &pb.ExecutionBlock{}
|
||||
err := c.rpc.CallContext(
|
||||
ctx,
|
||||
result,
|
||||
ExecutionBlockByNumberMethod,
|
||||
"latest",
|
||||
false, /* no full transaction objects */
|
||||
)
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// ExecutionBlockByHash fetches an execution engine block by hash by calling
|
||||
// eth_blockByHash via JSON-RPC.
|
||||
func (c *Client) ExecutionBlockByHash(ctx context.Context, hash common.Hash) (*pb.ExecutionBlock, error) {
|
||||
result := &pb.ExecutionBlock{}
|
||||
err := c.rpc.CallContext(ctx, result, ExecutionBlockByHashMethod, hash, false /* no full transaction objects */)
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// Handles errors received from the RPC server according to the specification.
|
||||
func handleRPCError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
e, ok := err.(rpc.Error)
|
||||
if !ok {
|
||||
return errors.Wrap(err, "got an unexpected error")
|
||||
}
|
||||
switch e.ErrorCode() {
|
||||
case -32700:
|
||||
return ErrParse
|
||||
case -32600:
|
||||
return ErrInvalidRequest
|
||||
case -32601:
|
||||
return ErrMethodNotFound
|
||||
case -32602:
|
||||
return ErrInvalidParams
|
||||
case -32603:
|
||||
return ErrInternal
|
||||
case -32001:
|
||||
return ErrUnknownPayload
|
||||
case -32000:
|
||||
// Only -32000 status codes are data errors in the RPC specification.
|
||||
errWithData, ok := err.(rpc.DataError)
|
||||
if !ok {
|
||||
return errors.Wrap(err, "got an unexpected error")
|
||||
}
|
||||
return errors.Wrapf(ErrServer, "%v", errWithData.ErrorData())
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
668
beacon-chain/powchain/engine-api-client/v1/client_test.go
Normal file
668
beacon-chain/powchain/engine-api-client/v1/client_test.go
Normal file
@@ -0,0 +1,668 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var _ = EngineCaller(&Client{})
|
||||
|
||||
func TestClient_IPC(t *testing.T) {
|
||||
server := newTestIPCServer(t)
|
||||
defer server.Stop()
|
||||
rpcClient := rpc.DialInProc(server)
|
||||
defer rpcClient.Close()
|
||||
client := &Client{}
|
||||
client.rpc = rpcClient
|
||||
ctx := context.Background()
|
||||
fix := fixtures()
|
||||
|
||||
t.Run(GetPayloadMethod, func(t *testing.T) {
|
||||
want, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
payloadId := [8]byte{1}
|
||||
resp, err := client.GetPayload(ctx, payloadId)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethod, func(t *testing.T) {
|
||||
want, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
resp, err := client.ForkchoiceUpdated(ctx, &pb.ForkchoiceState{}, &pb.PayloadAttributes{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.Status, resp.Status)
|
||||
require.DeepEqual(t, want.PayloadId, resp.PayloadId)
|
||||
})
|
||||
t.Run(NewPayloadMethod, func(t *testing.T) {
|
||||
want, ok := fix["PayloadStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
req, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
resp, err := client.NewPayload(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
t.Run(NewPayloadMethod, func(t *testing.T) {
|
||||
want, ok := fix["PayloadStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
req, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
resp, err := client.NewPayload(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
t.Run(ExchangeTransitionConfigurationMethod, func(t *testing.T) {
|
||||
want, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
resp, err := client.ExchangeTransitionConfiguration(ctx, want)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
t.Run(ExecutionBlockByNumberMethod, func(t *testing.T) {
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
require.Equal(t, true, ok)
|
||||
resp, err := client.LatestExecutionBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
t.Run(ExecutionBlockByHashMethod, func(t *testing.T) {
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
require.Equal(t, true, ok)
|
||||
arg := common.BytesToHash([]byte("foo"))
|
||||
resp, err := client.ExecutionBlockByHash(ctx, arg)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
}
|
||||
|
||||
func TestClient_HTTP(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fix := fixtures()
|
||||
|
||||
t.Run(GetPayloadMethod, func(t *testing.T) {
|
||||
payloadId := [8]byte{1}
|
||||
want, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := ioutil.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
reqArg, err := json.Marshal(pb.PayloadIDBytes(payloadId))
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(reqArg),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": want,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
client := &Client{}
|
||||
client.rpc = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.GetPayload(ctx, payloadId)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethod, func(t *testing.T) {
|
||||
forkChoiceState := &pb.ForkchoiceState{
|
||||
HeadBlockHash: []byte("head"),
|
||||
SafeBlockHash: []byte("safe"),
|
||||
FinalizedBlockHash: []byte("finalized"),
|
||||
}
|
||||
payloadAttributes := &pb.PayloadAttributes{
|
||||
Timestamp: 1,
|
||||
Random: []byte("random"),
|
||||
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
|
||||
}
|
||||
want, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := ioutil.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
forkChoiceStateReq, err := json.Marshal(forkChoiceState)
|
||||
require.NoError(t, err)
|
||||
payloadAttrsReq, err := json.Marshal(payloadAttributes)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(forkChoiceStateReq),
|
||||
))
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(payloadAttrsReq),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": want,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
client := &Client{}
|
||||
client.rpc = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.Status, resp.Status)
|
||||
require.DeepEqual(t, want.PayloadId, resp.PayloadId)
|
||||
})
|
||||
t.Run(NewPayloadMethod, func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["PayloadStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := ioutil.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
reqArg, err := json.Marshal(execPayload)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(reqArg),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": want,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
client := &Client{}
|
||||
client.rpc = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.NewPayload(ctx, execPayload)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
t.Run(ExecutionBlockByNumberMethod, func(t *testing.T) {
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
require.Equal(t, true, ok)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": want,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
client := &Client{}
|
||||
client.rpc = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.LatestExecutionBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
t.Run(ExchangeTransitionConfigurationMethod, func(t *testing.T) {
|
||||
want, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
encodedReq, err := json.Marshal(want)
|
||||
require.NoError(t, err)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := ioutil.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(encodedReq),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": want,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
client := &Client{}
|
||||
client.rpc = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.ExchangeTransitionConfiguration(ctx, want)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
t.Run(ExecutionBlockByHashMethod, func(t *testing.T) {
|
||||
arg := common.BytesToHash([]byte("foo"))
|
||||
want, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
require.Equal(t, true, ok)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := ioutil.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, fmt.Sprintf("%#x", arg),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": want,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
client := &Client{}
|
||||
client.rpc = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.ExecutionBlockByHash(ctx, arg)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
}
|
||||
|
||||
func TestExchangeTransitionConfiguration(t *testing.T) {
|
||||
fix := fixtures()
|
||||
ctx := context.Background()
|
||||
t.Run("wrong terminal block hash", func(t *testing.T) {
|
||||
request, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
resp, ok := proto.Clone(request).(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
// Change the terminal block hash.
|
||||
h := common.BytesToHash([]byte("foo"))
|
||||
resp.TerminalBlockHash = h[:]
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
client := &Client{}
|
||||
client.rpc = rpcClient
|
||||
|
||||
_, err = client.ExchangeTransitionConfiguration(ctx, request)
|
||||
require.Equal(t, true, errors.Is(err, ErrMismatchTerminalBlockHash))
|
||||
})
|
||||
t.Run("wrong terminal total difficulty", func(t *testing.T) {
|
||||
request, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
resp, ok := proto.Clone(request).(*pb.TransitionConfiguration)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
// Change the terminal block hash.
|
||||
resp.TerminalTotalDifficulty = "bar"
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
client := &Client{}
|
||||
client.rpc = rpcClient
|
||||
|
||||
_, err = client.ExchangeTransitionConfiguration(ctx, request)
|
||||
require.Equal(t, true, errors.Is(err, ErrMismatchTerminalTotalDiff))
|
||||
})
|
||||
}
|
||||
|
||||
type customError struct {
|
||||
code int
|
||||
}
|
||||
|
||||
func (c *customError) ErrorCode() int {
|
||||
return c.code
|
||||
}
|
||||
|
||||
func (*customError) Error() string {
|
||||
return "something went wrong"
|
||||
}
|
||||
|
||||
type dataError struct {
|
||||
code int
|
||||
data interface{}
|
||||
}
|
||||
|
||||
func (c *dataError) ErrorCode() int {
|
||||
return c.code
|
||||
}
|
||||
|
||||
func (*dataError) Error() string {
|
||||
return "something went wrong"
|
||||
}
|
||||
|
||||
func (c *dataError) ErrorData() interface{} {
|
||||
return c.data
|
||||
}
|
||||
|
||||
func Test_handleRPCError(t *testing.T) {
|
||||
got := handleRPCError(nil)
|
||||
require.Equal(t, true, got == nil)
|
||||
|
||||
var tests = []struct {
|
||||
name string
|
||||
expected error
|
||||
expectedContains string
|
||||
given error
|
||||
}{
|
||||
{
|
||||
name: "not an rpc error",
|
||||
expectedContains: "got an unexpected error",
|
||||
given: errors.New("foo"),
|
||||
},
|
||||
{
|
||||
name: "ErrParse",
|
||||
expectedContains: ErrParse.Error(),
|
||||
given: &customError{code: -32700},
|
||||
},
|
||||
{
|
||||
name: "ErrInvalidRequest",
|
||||
expectedContains: ErrInvalidRequest.Error(),
|
||||
given: &customError{code: -32600},
|
||||
},
|
||||
{
|
||||
name: "ErrMethodNotFound",
|
||||
expectedContains: ErrMethodNotFound.Error(),
|
||||
given: &customError{code: -32601},
|
||||
},
|
||||
{
|
||||
name: "ErrInvalidParams",
|
||||
expectedContains: ErrInvalidParams.Error(),
|
||||
given: &customError{code: -32602},
|
||||
},
|
||||
{
|
||||
name: "ErrInternal",
|
||||
expectedContains: ErrInternal.Error(),
|
||||
given: &customError{code: -32603},
|
||||
},
|
||||
{
|
||||
name: "ErrUnknownPayload",
|
||||
expectedContains: ErrUnknownPayload.Error(),
|
||||
given: &customError{code: -32001},
|
||||
},
|
||||
{
|
||||
name: "ErrServer unexpected no data",
|
||||
expectedContains: "got an unexpected error",
|
||||
given: &customError{code: -32000},
|
||||
},
|
||||
{
|
||||
name: "ErrServer with data",
|
||||
expectedContains: ErrServer.Error(),
|
||||
given: &dataError{code: -32000, data: 5},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := handleRPCError(tt.given)
|
||||
require.ErrorContains(t, tt.expectedContains, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newTestIPCServer(t *testing.T) *rpc.Server {
|
||||
server := rpc.NewServer()
|
||||
err := server.RegisterName("engine", new(testEngineService))
|
||||
require.NoError(t, err)
|
||||
err = server.RegisterName("eth", new(testEngineService))
|
||||
require.NoError(t, err)
|
||||
return server
|
||||
}
|
||||
|
||||
func fixtures() map[string]interface{} {
|
||||
foo := bytesutil.ToBytes32([]byte("foo"))
|
||||
bar := bytesutil.PadTo([]byte("bar"), 20)
|
||||
baz := bytesutil.PadTo([]byte("baz"), 256)
|
||||
baseFeePerGas := big.NewInt(6)
|
||||
executionPayloadFixture := &pb.ExecutionPayload{
|
||||
ParentHash: foo[:],
|
||||
FeeRecipient: bar,
|
||||
StateRoot: foo[:],
|
||||
ReceiptsRoot: foo[:],
|
||||
LogsBloom: baz,
|
||||
Random: foo[:],
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: foo[:],
|
||||
BaseFeePerGas: bytesutil.PadTo(baseFeePerGas.Bytes(), fieldparams.RootLength),
|
||||
BlockHash: foo[:],
|
||||
Transactions: [][]byte{foo[:]},
|
||||
}
|
||||
number := bytesutil.PadTo([]byte("100"), fieldparams.RootLength)
|
||||
hash := bytesutil.PadTo([]byte("hash"), fieldparams.RootLength)
|
||||
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
|
||||
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
|
||||
miner := bytesutil.PadTo([]byte("miner"), fieldparams.FeeRecipientLength)
|
||||
stateRoot := bytesutil.PadTo([]byte("stateRoot"), fieldparams.RootLength)
|
||||
transactionsRoot := bytesutil.PadTo([]byte("transactionsRoot"), fieldparams.RootLength)
|
||||
receiptsRoot := bytesutil.PadTo([]byte("receiptsRoot"), fieldparams.RootLength)
|
||||
logsBloom := bytesutil.PadTo([]byte("logs"), fieldparams.LogsBloomLength)
|
||||
executionBlock := &pb.ExecutionBlock{
|
||||
Number: number,
|
||||
Hash: hash,
|
||||
ParentHash: parent,
|
||||
Sha3Uncles: sha3Uncles,
|
||||
Miner: miner,
|
||||
StateRoot: stateRoot,
|
||||
TransactionsRoot: transactionsRoot,
|
||||
ReceiptsRoot: receiptsRoot,
|
||||
LogsBloom: logsBloom,
|
||||
Difficulty: bytesutil.PadTo([]byte("1"), fieldparams.RootLength),
|
||||
TotalDifficulty: "2",
|
||||
GasLimit: 3,
|
||||
GasUsed: 4,
|
||||
Timestamp: 5,
|
||||
Size: bytesutil.PadTo([]byte("6"), fieldparams.RootLength),
|
||||
ExtraData: bytesutil.PadTo([]byte("extraData"), fieldparams.RootLength),
|
||||
BaseFeePerGas: bytesutil.PadTo([]byte("baseFeePerGas"), fieldparams.RootLength),
|
||||
Transactions: [][]byte{foo[:]},
|
||||
Uncles: [][]byte{foo[:]},
|
||||
}
|
||||
status := &pb.PayloadStatus{
|
||||
Status: pb.PayloadStatus_ACCEPTED,
|
||||
LatestValidHash: foo[:],
|
||||
ValidationError: "",
|
||||
}
|
||||
id := pb.PayloadIDBytes([8]byte{1, 0, 0, 0, 0, 0, 0, 0})
|
||||
forkChoiceResp := &ForkchoiceUpdatedResponse{
|
||||
Status: status,
|
||||
PayloadId: &id,
|
||||
}
|
||||
transitionCfg := &pb.TransitionConfiguration{
|
||||
TerminalBlockHash: params.BeaconConfig().TerminalBlockHash[:],
|
||||
TerminalTotalDifficulty: params.BeaconConfig().TerminalTotalDifficulty,
|
||||
TerminalBlockNumber: big.NewInt(0).Bytes(),
|
||||
}
|
||||
return map[string]interface{}{
|
||||
"ExecutionBlock": executionBlock,
|
||||
"ExecutionPayload": executionPayloadFixture,
|
||||
"PayloadStatus": status,
|
||||
"ForkchoiceUpdatedResponse": forkChoiceResp,
|
||||
"TransitionConfiguration": transitionCfg,
|
||||
}
|
||||
}
|
||||
|
||||
type testEngineService struct{}
|
||||
|
||||
func (*testEngineService) NoArgsRets() {}
|
||||
|
||||
func (*testEngineService) GetBlockByHash(
|
||||
_ context.Context, _ common.Hash, _ bool,
|
||||
) *pb.ExecutionBlock {
|
||||
fix := fixtures()
|
||||
item, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
if !ok {
|
||||
panic("not found")
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
func (*testEngineService) GetBlockByNumber(
|
||||
_ context.Context, _ string, _ bool,
|
||||
) *pb.ExecutionBlock {
|
||||
fix := fixtures()
|
||||
item, ok := fix["ExecutionBlock"].(*pb.ExecutionBlock)
|
||||
if !ok {
|
||||
panic("not found")
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
func (*testEngineService) GetPayloadV1(
|
||||
_ context.Context, _ pb.PayloadIDBytes,
|
||||
) *pb.ExecutionPayload {
|
||||
fix := fixtures()
|
||||
item, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
|
||||
if !ok {
|
||||
panic("not found")
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
func (*testEngineService) ExchangeTransitionConfigurationV1(
|
||||
_ context.Context, _ *pb.TransitionConfiguration,
|
||||
) *pb.TransitionConfiguration {
|
||||
fix := fixtures()
|
||||
item, ok := fix["TransitionConfiguration"].(*pb.TransitionConfiguration)
|
||||
if !ok {
|
||||
panic("not found")
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
func (*testEngineService) ForkchoiceUpdatedV1(
|
||||
_ context.Context, _ *pb.ForkchoiceState, _ *pb.PayloadAttributes,
|
||||
) *ForkchoiceUpdatedResponse {
|
||||
fix := fixtures()
|
||||
item, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
|
||||
if !ok {
|
||||
panic("not found")
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
func (*testEngineService) NewPayloadV1(
|
||||
_ context.Context, _ *pb.ExecutionPayload,
|
||||
) *pb.PayloadStatus {
|
||||
fix := fixtures()
|
||||
item, ok := fix["PayloadStatus"].(*pb.PayloadStatus)
|
||||
if !ok {
|
||||
panic("not found")
|
||||
}
|
||||
return item
|
||||
}
|
||||
28
beacon-chain/powchain/engine-api-client/v1/errors.go
Normal file
28
beacon-chain/powchain/engine-api-client/v1/errors.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package v1
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
var (
|
||||
// ErrParse corresponds to JSON-RPC code -32700.
|
||||
ErrParse = errors.New("invalid JSON was received by the server")
|
||||
// ErrInvalidRequest corresponds to JSON-RPC code -32600.
|
||||
ErrInvalidRequest = errors.New("JSON sent is not valid request object")
|
||||
// ErrMethodNotFound corresponds to JSON-RPC code -32601.
|
||||
ErrMethodNotFound = errors.New("method not found")
|
||||
// ErrInvalidParams corresponds to JSON-RPC code -32602.
|
||||
ErrInvalidParams = errors.New("invalid method parameter(s)")
|
||||
// ErrInternal corresponds to JSON-RPC code -32603.
|
||||
ErrInternal = errors.New("internal JSON-RPC error")
|
||||
// ErrServer corresponds to JSON-RPC code -32000.
|
||||
ErrServer = errors.New("client error while processing request")
|
||||
// ErrUnknownPayload corresponds to JSON-RPC code -32001.
|
||||
ErrUnknownPayload = errors.New("payload does not exist or is not available")
|
||||
// ErrUnsupportedScheme for unsupported URL schemes.
|
||||
ErrUnsupportedScheme = errors.New("unsupported url scheme, only http(s) and ipc are supported")
|
||||
// ErrMismatchTerminalBlockHash when the terminal block hash value received via
|
||||
// the API mismatches Prysm's configuration value.
|
||||
ErrMismatchTerminalBlockHash = errors.New("terminal block hash mismatch")
|
||||
// ErrMismatchTerminalTotalDiff when the terminal total difficulty value received via
|
||||
// the API mismatches Prysm's configuration value.
|
||||
ErrMismatchTerminalTotalDiff = errors.New("terminal total difficulty mismatch")
|
||||
)
|
||||
39
beacon-chain/powchain/engine-api-client/v1/options.go
Normal file
39
beacon-chain/powchain/engine-api-client/v1/options.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Option for configuring the engine API client.
|
||||
type Option func(c *Client) error
|
||||
|
||||
type config struct {
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
func defaultConfig() *config {
|
||||
return &config{
|
||||
httpClient: &http.Client{
|
||||
Timeout: DefaultTimeout,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// WithJWTSecret allows setting a JWT secret for authenticating
|
||||
// the client via HTTP connections.
|
||||
func WithJWTSecret(secret []byte) Option {
|
||||
return func(c *Client) error {
|
||||
if len(secret) == 0 {
|
||||
return nil
|
||||
}
|
||||
authTransport := &jwtTransport{
|
||||
underlyingTransport: http.DefaultTransport,
|
||||
jwtSecret: secret,
|
||||
}
|
||||
c.cfg.httpClient = &http.Client{
|
||||
Timeout: DefaultTimeout,
|
||||
Transport: authTransport,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -428,27 +428,27 @@ func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (s *Service) retrieveBlockHashAndTime(ctx context.Context, blkNum *big.Int) ([32]byte, uint64, error) {
|
||||
hash, err := s.BlockHashByHeight(ctx, blkNum)
|
||||
bHash, err := s.BlockHashByHeight(ctx, blkNum)
|
||||
if err != nil {
|
||||
return [32]byte{}, 0, errors.Wrap(err, "could not get eth1 block hash")
|
||||
}
|
||||
if hash == [32]byte{} {
|
||||
if bHash == [32]byte{} {
|
||||
return [32]byte{}, 0, errors.Wrap(err, "got empty block hash")
|
||||
}
|
||||
timeStamp, err := s.BlockTimeByHeight(ctx, blkNum)
|
||||
if err != nil {
|
||||
return [32]byte{}, 0, errors.Wrap(err, "could not get block timestamp")
|
||||
}
|
||||
return hash, timeStamp, nil
|
||||
return bHash, timeStamp, nil
|
||||
}
|
||||
|
||||
// checkBlockNumberForChainStart checks the given block number for if chainstart has occurred.
|
||||
func (s *Service) checkBlockNumberForChainStart(ctx context.Context, blkNum *big.Int) error {
|
||||
hash, timeStamp, err := s.retrieveBlockHashAndTime(ctx, blkNum)
|
||||
bHash, timeStamp, err := s.retrieveBlockHashAndTime(ctx, blkNum)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.checkForChainstart(ctx, hash, blkNum, timeStamp)
|
||||
s.checkForChainstart(ctx, bHash, blkNum, timeStamp)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -333,7 +333,7 @@ func TestProcessETH2GenesisLog(t *testing.T) {
|
||||
err = web3Service.ProcessETH1Block(context.Background(), big.NewInt(int64(logs[len(logs)-1].BlockNumber)))
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedDeposits := web3Service.ChainStartDeposits()
|
||||
cachedDeposits := web3Service.chainStartData.ChainstartDeposits
|
||||
require.Equal(t, depositsReqForChainStart, len(cachedDeposits))
|
||||
|
||||
// Receive the chain started event.
|
||||
@@ -425,7 +425,7 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
|
||||
err = web3Service.processPastLogs(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedDeposits := web3Service.ChainStartDeposits()
|
||||
cachedDeposits := web3Service.chainStartData.ChainstartDeposits
|
||||
requiredDepsForChainstart := depositsReqForChainStart + depositOffset
|
||||
require.Equal(t, requiredDepsForChainstart, len(cachedDeposits), "Did not cache the chain start deposits correctly")
|
||||
|
||||
@@ -529,7 +529,7 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
|
||||
err = web3Service.processPastLogs(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedDeposits := web3Service.ChainStartDeposits()
|
||||
cachedDeposits := web3Service.chainStartData.ChainstartDeposits
|
||||
require.Equal(t, totalNumOfDeposits, len(cachedDeposits), "Did not cache the chain start deposits correctly")
|
||||
|
||||
// Receive the chain started event.
|
||||
|
||||
@@ -32,6 +32,22 @@ func WithHttpEndpoints(endpointStrings []string) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithExecutionEndpoint for the execution node JSON-RPC endpoint.
|
||||
func WithExecutionEndpoint(endpoint string) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.executionEndpoint = endpoint
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithExecutionClientJWTSecret for authenticating the execution node JSON-RPC endpoint.
|
||||
func WithExecutionClientJWTSecret(jwtSecret []byte) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.executionEndpointJWTSecret = jwtSecret
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDepositContractAddress for the deposit contract.
|
||||
func WithDepositContractAddress(addr common.Address) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -27,10 +27,13 @@ import (
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
engine "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
nativev1 "github.com/prysmaticlabs/prysm/beacon-chain/state/state-native/v1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/container/trie"
|
||||
contracts "github.com/prysmaticlabs/prysm/contracts/deposit"
|
||||
@@ -78,7 +81,6 @@ var (
|
||||
// ChainStartFetcher retrieves information pertaining to the chain start event
|
||||
// of the beacon chain for usage across various services.
|
||||
type ChainStartFetcher interface {
|
||||
ChainStartDeposits() []*ethpb.Deposit
|
||||
ChainStartEth1Data() *ethpb.Eth1Data
|
||||
PreGenesisState() state.BeaconState
|
||||
ClearPreGenesisData()
|
||||
@@ -125,16 +127,18 @@ type RPCClient interface {
|
||||
|
||||
// config defines a config struct for dependencies into the service.
|
||||
type config struct {
|
||||
depositContractAddr common.Address
|
||||
beaconDB db.HeadAccessDatabase
|
||||
depositCache *depositcache.DepositCache
|
||||
stateNotifier statefeed.Notifier
|
||||
stateGen *stategen.State
|
||||
eth1HeaderReqLimit uint64
|
||||
beaconNodeStatsUpdater BeaconNodeStatsUpdater
|
||||
httpEndpoints []network.Endpoint
|
||||
currHttpEndpoint network.Endpoint
|
||||
finalizedStateAtStartup state.BeaconState
|
||||
depositContractAddr common.Address
|
||||
beaconDB db.HeadAccessDatabase
|
||||
depositCache *depositcache.DepositCache
|
||||
stateNotifier statefeed.Notifier
|
||||
stateGen *stategen.State
|
||||
eth1HeaderReqLimit uint64
|
||||
beaconNodeStatsUpdater BeaconNodeStatsUpdater
|
||||
httpEndpoints []network.Endpoint
|
||||
executionEndpoint string
|
||||
executionEndpointJWTSecret []byte
|
||||
currHttpEndpoint network.Endpoint
|
||||
finalizedStateAtStartup state.BeaconState
|
||||
}
|
||||
|
||||
// Service fetches important information about the canonical
|
||||
@@ -153,6 +157,7 @@ type Service struct {
|
||||
headTicker *time.Ticker
|
||||
httpLogger bind.ContractFilterer
|
||||
eth1DataFetcher RPCDataFetcher
|
||||
engineAPIClient *engine.Client
|
||||
rpcClient RPCClient
|
||||
headerCache *headerCache // cache to store block hash/block height.
|
||||
latestEth1Data *ethpb.LatestETH1Data
|
||||
@@ -208,6 +213,10 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.initializeEngineAPIClient(ctx); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to initialize engine API client")
|
||||
}
|
||||
|
||||
if err := s.ensureValidPowchainData(ctx); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to validate powchain data")
|
||||
}
|
||||
@@ -262,16 +271,14 @@ func (s *Service) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChainStartDeposits returns a slice of validator deposit data processed
|
||||
// by the deposit contract and cached in the powchain service.
|
||||
func (s *Service) ChainStartDeposits() []*ethpb.Deposit {
|
||||
return s.chainStartData.ChainstartDeposits
|
||||
}
|
||||
|
||||
// ClearPreGenesisData clears out the stored chainstart deposits and beacon state.
|
||||
func (s *Service) ClearPreGenesisData() {
|
||||
s.chainStartData.ChainstartDeposits = []*ethpb.Deposit{}
|
||||
s.preGenesisState = &v1.BeaconState{}
|
||||
if features.Get().EnableNativeState {
|
||||
s.preGenesisState = &nativev1.BeaconState{}
|
||||
} else {
|
||||
s.preGenesisState = &v1.BeaconState{}
|
||||
}
|
||||
}
|
||||
|
||||
// ChainStartEth1Data returns the eth1 data at chainstart.
|
||||
@@ -298,6 +305,12 @@ func (s *Service) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// EngineAPIClient returns the associated engine API client to interact
|
||||
// with an execution node via JSON-RPC.
|
||||
func (s *Service) EngineAPIClient() *engine.Client {
|
||||
return s.engineAPIClient
|
||||
}
|
||||
|
||||
func (s *Service) updateBeaconNodeStats() {
|
||||
bs := clientstats.BeaconNodeStats{}
|
||||
if len(s.cfg.httpEndpoints) > 1 {
|
||||
@@ -363,45 +376,6 @@ func (s *Service) ETH1ConnectionErrors() []error {
|
||||
return errs
|
||||
}
|
||||
|
||||
// DepositRoot returns the Merkle root of the latest deposit trie
|
||||
// from the ETH1.0 deposit contract.
|
||||
func (s *Service) DepositRoot() [32]byte {
|
||||
return s.depositTrie.HashTreeRoot()
|
||||
}
|
||||
|
||||
// DepositTrie returns the sparse Merkle trie used for storing
|
||||
// deposits from the ETH1.0 deposit contract.
|
||||
func (s *Service) DepositTrie() *trie.SparseMerkleTrie {
|
||||
return s.depositTrie
|
||||
}
|
||||
|
||||
// LatestBlockHeight in the ETH1.0 chain.
|
||||
func (s *Service) LatestBlockHeight() *big.Int {
|
||||
return big.NewInt(int64(s.latestEth1Data.BlockHeight))
|
||||
}
|
||||
|
||||
// LatestBlockHash in the ETH1.0 chain.
|
||||
func (s *Service) LatestBlockHash() common.Hash {
|
||||
return bytesutil.ToBytes32(s.latestEth1Data.BlockHash)
|
||||
}
|
||||
|
||||
// AreAllDepositsProcessed determines if all the logs from the deposit contract
|
||||
// are processed.
|
||||
func (s *Service) AreAllDepositsProcessed() (bool, error) {
|
||||
s.processingLock.RLock()
|
||||
defer s.processingLock.RUnlock()
|
||||
countByte, err := s.depositContractCaller.GetDepositCount(&bind.CallOpts{})
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get deposit count")
|
||||
}
|
||||
count := bytesutil.FromBytes8(countByte)
|
||||
deposits := s.cfg.depositCache.AllDeposits(s.ctx, nil)
|
||||
if count != uint64(len(deposits)) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// refers to the latest eth1 block which follows the condition: eth1_timestamp +
|
||||
// SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= current_unix_time
|
||||
func (s *Service) followBlockHeight(_ context.Context) (uint64, error) {
|
||||
@@ -798,13 +772,20 @@ func (s *Service) initPOWService() {
|
||||
// Handle edge case with embedded genesis state by fetching genesis header to determine
|
||||
// its height.
|
||||
if s.chainStartData.Chainstarted && s.chainStartData.GenesisBlock == 0 {
|
||||
genHeader, err := s.eth1DataFetcher.HeaderByHash(ctx, common.BytesToHash(s.chainStartData.Eth1Data.BlockHash))
|
||||
if err != nil {
|
||||
log.Errorf("Unable to retrieve genesis ETH1.0 chain header: %v", err)
|
||||
s.retryETH1Node(err)
|
||||
continue
|
||||
genHash := common.BytesToHash(s.chainStartData.Eth1Data.BlockHash)
|
||||
genBlock := s.chainStartData.GenesisBlock
|
||||
// In the event our provided chainstart data references a non-existent blockhash
|
||||
// we assume the genesis block to be 0.
|
||||
if genHash != [32]byte{} {
|
||||
genHeader, err := s.eth1DataFetcher.HeaderByHash(ctx, genHash)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to retrieve genesis ETH1.0 chain header: %v", err)
|
||||
s.retryETH1Node(err)
|
||||
continue
|
||||
}
|
||||
genBlock = genHeader.Number.Uint64()
|
||||
}
|
||||
s.chainStartData.GenesisBlock = genHeader.Number.Uint64()
|
||||
s.chainStartData.GenesisBlock = genBlock
|
||||
if err := s.savePowchainData(ctx); err != nil {
|
||||
log.Errorf("Unable to save powchain data: %v", err)
|
||||
}
|
||||
@@ -1073,6 +1054,22 @@ func (s *Service) ensureValidPowchainData(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initializes a connection to the engine API if an execution provider endpoint is set.
|
||||
func (s *Service) initializeEngineAPIClient(ctx context.Context) error {
|
||||
if s.cfg.executionEndpoint == "" {
|
||||
return nil
|
||||
}
|
||||
opts := []engine.Option{
|
||||
engine.WithJWTSecret(s.cfg.executionEndpointJWTSecret),
|
||||
}
|
||||
client, err := engine.New(ctx, s.cfg.executionEndpoint, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.engineAPIClient = client
|
||||
return nil
|
||||
}
|
||||
|
||||
func dedupEndpoints(endpoints []string) []string {
|
||||
selectionMap := make(map[string]bool)
|
||||
newEndpoints := make([]string, 0, len(endpoints))
|
||||
|
||||
@@ -16,7 +16,6 @@ go_library(
|
||||
"//beacon-chain/powchain/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//accounts/abi/bind/backends:go_default_library",
|
||||
@@ -24,5 +23,6 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -2,15 +2,14 @@ package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/container/trie"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -25,11 +24,6 @@ func (_ *FaultyMockPOWChain) Eth2GenesisPowchainInfo() (uint64, *big.Int) {
|
||||
return 0, big.NewInt(0)
|
||||
}
|
||||
|
||||
// LatestBlockHeight --
|
||||
func (_ *FaultyMockPOWChain) LatestBlockHeight() *big.Int {
|
||||
return big.NewInt(0)
|
||||
}
|
||||
|
||||
// BlockExists --
|
||||
func (f *FaultyMockPOWChain) BlockExists(_ context.Context, _ common.Hash) (bool, *big.Int, error) {
|
||||
if f.HashesByHeight == nil {
|
||||
@@ -54,21 +48,6 @@ func (_ *FaultyMockPOWChain) BlockByTimestamp(_ context.Context, _ uint64) (*typ
|
||||
return &types.HeaderInfo{Number: big.NewInt(0)}, nil
|
||||
}
|
||||
|
||||
// DepositRoot --
|
||||
func (_ *FaultyMockPOWChain) DepositRoot() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// DepositTrie --
|
||||
func (_ *FaultyMockPOWChain) DepositTrie() *trie.SparseMerkleTrie {
|
||||
return &trie.SparseMerkleTrie{}
|
||||
}
|
||||
|
||||
// ChainStartDeposits --
|
||||
func (_ *FaultyMockPOWChain) ChainStartDeposits() []*ethpb.Deposit {
|
||||
return []*ethpb.Deposit{}
|
||||
}
|
||||
|
||||
// ChainStartEth1Data --
|
||||
func (_ *FaultyMockPOWChain) ChainStartEth1Data() *ethpb.Eth1Data {
|
||||
return ðpb.Eth1Data{}
|
||||
@@ -76,7 +55,11 @@ func (_ *FaultyMockPOWChain) ChainStartEth1Data() *ethpb.Eth1Data {
|
||||
|
||||
// PreGenesisState --
|
||||
func (_ *FaultyMockPOWChain) PreGenesisState() state.BeaconState {
|
||||
return &v1.BeaconState{}
|
||||
s, err := v1.InitializeFromProtoUnsafe(ðpb.BeaconState{})
|
||||
if err != nil {
|
||||
panic("could not initialize state")
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// ClearPreGenesisData --
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/container/trie"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
@@ -58,11 +57,6 @@ func (m *POWChain) Eth2GenesisPowchainInfo() (uint64, *big.Int) {
|
||||
return uint64(GenesisTime), blk
|
||||
}
|
||||
|
||||
// DepositTrie --
|
||||
func (_ *POWChain) DepositTrie() *trie.SparseMerkleTrie {
|
||||
return &trie.SparseMerkleTrie{}
|
||||
}
|
||||
|
||||
// BlockExists --
|
||||
func (m *POWChain) BlockExists(_ context.Context, hash common.Hash) (bool, *big.Int, error) {
|
||||
// Reverse the map of heights by hash.
|
||||
@@ -107,17 +101,6 @@ func (m *POWChain) BlockByTimestamp(_ context.Context, time uint64) (*types.Head
|
||||
return &types.HeaderInfo{Number: chosenNumber, Time: chosenTime}, nil
|
||||
}
|
||||
|
||||
// DepositRoot --
|
||||
func (_ *POWChain) DepositRoot() [32]byte {
|
||||
root := []byte("depositroot")
|
||||
return bytesutil.ToBytes32(root)
|
||||
}
|
||||
|
||||
// ChainStartDeposits --
|
||||
func (_ *POWChain) ChainStartDeposits() []*ethpb.Deposit {
|
||||
return []*ethpb.Deposit{}
|
||||
}
|
||||
|
||||
// ChainStartEth1Data --
|
||||
func (m *POWChain) ChainStartEth1Data() *ethpb.Eth1Data {
|
||||
return m.Eth1Data
|
||||
|
||||
@@ -22,6 +22,7 @@ go_library(
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/powchain/engine-api-client/v1:go_default_library",
|
||||
"//beacon-chain/rpc/eth/beacon:go_default_library",
|
||||
"//beacon-chain/rpc/eth/debug:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
|
||||
@@ -31,6 +31,7 @@ func TestGetSpec(t *testing.T) {
|
||||
config.HysteresisQuotient = 9
|
||||
config.HysteresisDownwardMultiplier = 10
|
||||
config.HysteresisUpwardMultiplier = 11
|
||||
config.SafeSlotsToImportOptimistically = 128
|
||||
config.SafeSlotsToUpdateJustified = 12
|
||||
config.Eth1FollowDistance = 13
|
||||
config.TargetAggregatorsPerCommittee = 14
|
||||
@@ -51,7 +52,6 @@ func TestGetSpec(t *testing.T) {
|
||||
config.BellatrixForkEpoch = 101
|
||||
config.ShardingForkVersion = []byte("ShardingForkVersion")
|
||||
config.ShardingForkEpoch = 102
|
||||
config.MinAnchorPowBlockDifficulty = 1000
|
||||
config.BLSWithdrawalPrefixByte = byte('b')
|
||||
config.GenesisDelay = 24
|
||||
config.SecondsPerSlot = 25
|
||||
@@ -99,7 +99,7 @@ func TestGetSpec(t *testing.T) {
|
||||
config.MinSyncCommitteeParticipants = 71
|
||||
config.TerminalBlockHash = common.HexToHash("TerminalBlockHash")
|
||||
config.TerminalBlockHashActivationEpoch = 72
|
||||
config.TerminalTotalDifficulty = 73
|
||||
config.TerminalTotalDifficulty = "73"
|
||||
config.FeeRecipient = common.HexToAddress("FeeRecipient")
|
||||
|
||||
var dbp [4]byte
|
||||
@@ -130,7 +130,7 @@ func TestGetSpec(t *testing.T) {
|
||||
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 99, len(resp.Data))
|
||||
assert.Equal(t, 98, len(resp.Data))
|
||||
for k, v := range resp.Data {
|
||||
switch k {
|
||||
case "CONFIG_NAME":
|
||||
@@ -341,6 +341,8 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "70", v)
|
||||
case "INTERVALS_PER_SLOT":
|
||||
assert.Equal(t, "3", v)
|
||||
case "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY":
|
||||
assert.Equal(t, "128", v)
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
}
|
||||
|
||||
@@ -275,7 +275,7 @@ func (bs *Server) SubmitVoluntaryExit(ctx context.Context, req *ethpbv1.SignedVo
|
||||
return nil, status.Errorf(codes.Internal, "Could not get exiting validator: %v", err)
|
||||
}
|
||||
alphaExit := migration.V1ExitToV1Alpha1(req)
|
||||
err = blocks.VerifyExitAndSignature(validator, headState.Slot(), headState.Fork(), alphaExit, headState.GenesisValidatorRoot())
|
||||
err = blocks.VerifyExitAndSignature(validator, headState.Slot(), headState.Fork(), alphaExit, headState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Invalid voluntary exit: %v", err)
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ func (bs *Server) GetGenesis(ctx context.Context, _ *emptypb.Empty) (*ethpb.Gene
|
||||
if genesisTime.IsZero() {
|
||||
return nil, status.Errorf(codes.NotFound, "Chain genesis info is not yet known")
|
||||
}
|
||||
validatorRoot := bs.ChainInfoFetcher.GenesisValidatorRoot()
|
||||
validatorRoot := bs.ChainInfoFetcher.GenesisValidatorsRoot()
|
||||
if bytes.Equal(validatorRoot[:], params.BeaconConfig().ZeroHash[:]) {
|
||||
return nil, status.Errorf(codes.NotFound, "Chain genesis info is not yet known")
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ func TestGetGenesis(t *testing.T) {
|
||||
assert.ErrorContains(t, "Chain genesis info is not yet known", err)
|
||||
})
|
||||
|
||||
t.Run("No genesis validator root", func(t *testing.T) {
|
||||
t.Run("No genesis validators root", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{
|
||||
Genesis: genesis,
|
||||
ValidatorsRoot: [32]byte{},
|
||||
|
||||
@@ -13,8 +13,7 @@ go_library(
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/statefetcher:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
|
||||
@@ -4,8 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/eth/helpers"
|
||||
statev1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
statev2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/proto/migration"
|
||||
@@ -26,11 +25,10 @@ func (ds *Server) GetBeaconState(ctx context.Context, req *ethpbv1.StateRequest)
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
|
||||
st, ok := beaconSt.(*statev1.BeaconState)
|
||||
if !ok {
|
||||
return nil, status.Error(codes.Internal, "State type assertion failed")
|
||||
if beaconSt.Version() != version.Phase0 {
|
||||
return nil, status.Error(codes.Internal, "State has incorrect type")
|
||||
}
|
||||
protoSt, err := migration.BeaconStateToV1(st)
|
||||
protoSt, err := migration.BeaconStateToV1(beaconSt)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not convert state to proto: %v", err)
|
||||
}
|
||||
@@ -69,11 +67,7 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.StateReques
|
||||
}
|
||||
switch beaconSt.Version() {
|
||||
case version.Phase0:
|
||||
st, ok := beaconSt.(*statev1.BeaconState)
|
||||
if !ok {
|
||||
return nil, status.Error(codes.Internal, "State type assertion failed")
|
||||
}
|
||||
protoSt, err := migration.BeaconStateToV1(st)
|
||||
protoSt, err := migration.BeaconStateToV1(beaconSt)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not convert state to proto: %v", err)
|
||||
}
|
||||
@@ -84,7 +78,7 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.StateReques
|
||||
},
|
||||
}, nil
|
||||
case version.Altair:
|
||||
altairState, ok := beaconSt.(*statev2.BeaconState)
|
||||
altairState, ok := beaconSt.(state.BeaconStateAltair)
|
||||
if !ok {
|
||||
return nil, status.Error(codes.Internal, "Altair state type assertion failed")
|
||||
}
|
||||
|
||||
@@ -910,7 +910,7 @@ func TestServer_StreamIndexedAttestations_OK(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
domain, err := signing.Domain(headState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, headState.GenesisValidatorRoot())
|
||||
domain, err := signing.Domain(headState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, headState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
encoded, err := signing.ComputeSigningRoot(attExample.Data, domain)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -68,7 +68,7 @@ func (ns *Server) GetGenesis(ctx context.Context, _ *empty.Empty) (*ethpb.Genesi
|
||||
gt = timestamppb.New(genesisTime)
|
||||
}
|
||||
|
||||
genValRoot := ns.GenesisFetcher.GenesisValidatorRoot()
|
||||
genValRoot := ns.GenesisFetcher.GenesisValidatorsRoot()
|
||||
return ðpb.Genesis{
|
||||
GenesisTime: gt,
|
||||
DepositContractAddress: contractAddr,
|
||||
@@ -227,7 +227,11 @@ func (ns *Server) GetETH1ConnectionStatus(ctx context.Context, _ *empty.Empty) (
|
||||
errs := ns.POWChainInfoFetcher.ETH1ConnectionErrors()
|
||||
// Extract string version of the errors.
|
||||
for _, err := range errs {
|
||||
errStrs = append(errStrs, err.Error())
|
||||
if err == nil {
|
||||
errStrs = append(errStrs, "")
|
||||
} else {
|
||||
errStrs = append(errStrs, err.Error())
|
||||
}
|
||||
}
|
||||
return ðpb.ETH1ConnectionStatus{
|
||||
CurrentAddress: ns.POWChainInfoFetcher.CurrentETH1Endpoint(),
|
||||
|
||||
@@ -153,8 +153,8 @@ func TestNodeServer_ListPeers(t *testing.T) {
|
||||
func TestNodeServer_GetETH1ConnectionStatus(t *testing.T) {
|
||||
server := grpc.NewServer()
|
||||
eps := []string{"foo", "bar"}
|
||||
errs := []error{fmt.Errorf("error 1"), fmt.Errorf("error 2")}
|
||||
errStrs := []string{"error 1", "error 2"}
|
||||
errs := []error{fmt.Errorf("error 1"), fmt.Errorf("error 2"), nil}
|
||||
errStrs := []string{"error 1", "error 2", ""}
|
||||
mockFetcher := &testutil.MockPOWChainInfoFetcher{
|
||||
CurrEndpoint: eps[0],
|
||||
CurrError: errs[0],
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user