mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
350 Commits
rc5
...
test-blob-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c17151fe5a | ||
|
|
a051824d13 | ||
|
|
8c31b93b21 | ||
|
|
a35074f010 | ||
|
|
3b8fa38bf5 | ||
|
|
2abfc9e29c | ||
|
|
62c3c46282 | ||
|
|
4ebc1be298 | ||
|
|
913010bc02 | ||
|
|
7e71e44d2e | ||
|
|
dc8ba012e9 | ||
|
|
8dddf1eaeb | ||
|
|
66727a853f | ||
|
|
10fadd0ac3 | ||
|
|
b9ec6837ab | ||
|
|
0254d31810 | ||
|
|
f5bfb8e9eb | ||
|
|
25ce2ac4dc | ||
|
|
9bc4d9d666 | ||
|
|
3d9d765741 | ||
|
|
df94859f1d | ||
|
|
80e9042c6b | ||
|
|
a6eef7e983 | ||
|
|
b00f7f4592 | ||
|
|
6b7e8ac00e | ||
|
|
191e603e28 | ||
|
|
76c729f9fa | ||
|
|
ca770f12ee | ||
|
|
74bebd9244 | ||
|
|
797cc360c7 | ||
|
|
79da486508 | ||
|
|
5bb1491d8f | ||
|
|
c0ee781638 | ||
|
|
447b42044a | ||
|
|
6ec8d23d4f | ||
|
|
4d5827c527 | ||
|
|
88f03967dd | ||
|
|
a41d80a03d | ||
|
|
fb65287ba2 | ||
|
|
0cb46eb29a | ||
|
|
abe9e7fa7e | ||
|
|
3536669a70 | ||
|
|
849c1dd25b | ||
|
|
d430266b70 | ||
|
|
6d52516638 | ||
|
|
2ff3a82eac | ||
|
|
d403b68d76 | ||
|
|
b0768e39c3 | ||
|
|
cae57ee9f6 | ||
|
|
3104fd217d | ||
|
|
d1b9954566 | ||
|
|
033b165c08 | ||
|
|
78175ee0fd | ||
|
|
a34cc7f7bf | ||
|
|
6f22cd7963 | ||
|
|
58967e4516 | ||
|
|
947c9fbe60 | ||
|
|
8cc1e67e6c | ||
|
|
2f3deac8b0 | ||
|
|
aa44ba40ab | ||
|
|
8b268d646c | ||
|
|
d6ecadb471 | ||
|
|
9a8bde448e | ||
|
|
ce71b3b6b1 | ||
|
|
5fa30bf73a | ||
|
|
b93aba6126 | ||
|
|
51ed80df69 | ||
|
|
a614c4ac8c | ||
|
|
7b777a10a5 | ||
|
|
bf1ab9951f | ||
|
|
5cea2b3855 | ||
|
|
ab63757fe5 | ||
|
|
736ed1e003 | ||
|
|
6ee9707fd7 | ||
|
|
e40835b1a9 | ||
|
|
216a420bbc | ||
|
|
4845abecb8 | ||
|
|
08a910e44f | ||
|
|
67ba2c4fe3 | ||
|
|
7efa501bdc | ||
|
|
879a694ab3 | ||
|
|
4e3b1881ed | ||
|
|
35d3707de7 | ||
|
|
1f468bd3f5 | ||
|
|
ac32098c86 | ||
|
|
e0af005c42 | ||
|
|
f08af1bdbf | ||
|
|
6d0420fde5 | ||
|
|
5172e6e362 | ||
|
|
42df0f70b6 | ||
|
|
dace0f6a2d | ||
|
|
26a5878181 | ||
|
|
db6474a3e4 | ||
|
|
b84851fd0d | ||
|
|
673702c100 | ||
|
|
520eb6baca | ||
|
|
e6047dc344 | ||
|
|
d86a452b15 | ||
|
|
67f9d0b9c4 | ||
|
|
21cd055b84 | ||
|
|
9f3bb623ec | ||
|
|
b10a95097e | ||
|
|
4561f5cacb | ||
|
|
50b672a4db | ||
|
|
ffbb73a59b | ||
|
|
649974f14d | ||
|
|
9ec0bc0734 | ||
|
|
9649e49658 | ||
|
|
49fdcb7347 | ||
|
|
cd6ee956ed | ||
|
|
ef95fd33f8 | ||
|
|
1a488241b0 | ||
|
|
5fdd3a3d66 | ||
|
|
b6a32c050f | ||
|
|
055e225093 | ||
|
|
144218cb1b | ||
|
|
13b575a609 | ||
|
|
b5a414eae9 | ||
|
|
b94b347ace | ||
|
|
f5ee225819 | ||
|
|
9cb48be14f | ||
|
|
85fa9951eb | ||
|
|
ec72575fc9 | ||
|
|
d9d1bb6d3d | ||
|
|
ffcdc26618 | ||
|
|
96981a07b9 | ||
|
|
6b2721b239 | ||
|
|
c79151a574 | ||
|
|
4b20234801 | ||
|
|
911048aa6d | ||
|
|
255e9693ee | ||
|
|
61c1216e3d | ||
|
|
17e1eaf0f3 | ||
|
|
9940943595 | ||
|
|
9a0f941870 | ||
|
|
5d0f54d332 | ||
|
|
d602c94b7b | ||
|
|
6a5ecbd68f | ||
|
|
29dfcab505 | ||
|
|
16e5c903cc | ||
|
|
66682cb4e5 | ||
|
|
52faea8b7d | ||
|
|
8a78315682 | ||
|
|
cab42a4ae3 | ||
|
|
a5bdd42bdd | ||
|
|
a26197f919 | ||
|
|
8b9cab457e | ||
|
|
080ce31395 | ||
|
|
7866e8a196 | ||
|
|
d5d17e00b3 | ||
|
|
9c6a1331cf | ||
|
|
d89c97634c | ||
|
|
7e95ca3705 | ||
|
|
abd46b01b7 | ||
|
|
8629ac8417 | ||
|
|
304925aabf | ||
|
|
16d93e47a5 | ||
|
|
6dcb2bbf0d | ||
|
|
deb138959a | ||
|
|
45e6f3bd00 | ||
|
|
55a9e0d51a | ||
|
|
3ddae600fb | ||
|
|
861ede8945 | ||
|
|
93f11f9047 | ||
|
|
56503110dd | ||
|
|
f67d35dffd | ||
|
|
efbca1b5b7 | ||
|
|
2de0ebaf8d | ||
|
|
0815ef94a3 | ||
|
|
092ffa99e5 | ||
|
|
b05b67b264 | ||
|
|
a5c6518c20 | ||
|
|
da048395ce | ||
|
|
f31f7be310 | ||
|
|
e1a2267f86 | ||
|
|
3c9e4ee7f7 | ||
|
|
9ba32c9acd | ||
|
|
d23008452e | ||
|
|
f397cba1e0 | ||
|
|
3eecbb5b1a | ||
|
|
1583e93b48 | ||
|
|
849457df81 | ||
|
|
903cab75ee | ||
|
|
ee108d4aff | ||
|
|
49bcc58762 | ||
|
|
a08baf4a14 | ||
|
|
8c56dfdd46 | ||
|
|
dcdd9af9db | ||
|
|
a464cf5c60 | ||
|
|
cc55c754dc | ||
|
|
2d483ab09f | ||
|
|
d64e10a337 | ||
|
|
1e9ee10674 | ||
|
|
3ac395b39e | ||
|
|
6e26a6f128 | ||
|
|
b512b92a8a | ||
|
|
5ff601a1b9 | ||
|
|
5823054519 | ||
|
|
3d196662bc | ||
|
|
b0601580ef | ||
|
|
c1f29ea651 | ||
|
|
881d1d435a | ||
|
|
d1aae0c941 | ||
|
|
468cc23876 | ||
|
|
d9646a9183 | ||
|
|
279cee42f1 | ||
|
|
57bdb907cc | ||
|
|
15d683c78f | ||
|
|
bf6c8ced7d | ||
|
|
78fb685027 | ||
|
|
a87536eba0 | ||
|
|
3f05395a00 | ||
|
|
85fc57d41e | ||
|
|
1e5976d5ce | ||
|
|
98c0b23350 | ||
|
|
039a0fffba | ||
|
|
90ec640e7a | ||
|
|
10acd31d25 | ||
|
|
4224014fad | ||
|
|
df1e8b33d8 | ||
|
|
cdb4ee42cc | ||
|
|
d29baec77e | ||
|
|
53c189da9b | ||
|
|
277fbce61b | ||
|
|
0adc54b7ff | ||
|
|
1cbd7e9888 | ||
|
|
0a9e1658dd | ||
|
|
31d4a4cd11 | ||
|
|
fbc4e73d31 | ||
|
|
c1d4eaa79d | ||
|
|
760af6428e | ||
|
|
dfa0ccf626 | ||
|
|
7a142cf324 | ||
|
|
1a51fdbd58 | ||
|
|
368a99ec8d | ||
|
|
1c7e734918 | ||
|
|
764d1325bf | ||
|
|
0cf30e9022 | ||
|
|
227b20f368 | ||
|
|
d7d70bc25b | ||
|
|
82f6ddb693 | ||
|
|
9e4e82d2c5 | ||
|
|
9838369fe9 | ||
|
|
6085ad1bfa | ||
|
|
d3851b27df | ||
|
|
d6100dfdcb | ||
|
|
c2144dac86 | ||
|
|
a47ff569a8 | ||
|
|
f8be022ef2 | ||
|
|
4f39e6b685 | ||
|
|
c67b000633 | ||
|
|
02f7443586 | ||
|
|
6275e7df4e | ||
|
|
1b6b52fda1 | ||
|
|
5fa1fd84b9 | ||
|
|
bd0c9f9e8d | ||
|
|
2532bb370c | ||
|
|
12efc6c2c1 | ||
|
|
a6cc9ac9c5 | ||
|
|
031f5845a2 | ||
|
|
b88559726c | ||
|
|
ca6ddf4490 | ||
|
|
3ebb2fce94 | ||
|
|
62f6b07cba | ||
|
|
f956f1ed6e | ||
|
|
1c0fa95053 | ||
|
|
04bf4a1060 | ||
|
|
ae276fd371 | ||
|
|
104bdaed12 | ||
|
|
089a5d6ac2 | ||
|
|
16b0820193 | ||
|
|
4b02267e96 | ||
|
|
746584c453 | ||
|
|
b56daaaca2 | ||
|
|
b7a6fe88ee | ||
|
|
22d1c37b92 | ||
|
|
78a393f825 | ||
|
|
ac8290c1bf | ||
|
|
5d0662b415 | ||
|
|
931e5e10c3 | ||
|
|
c172f838b1 | ||
|
|
c07ae29cd9 | ||
|
|
214c9bfd8b | ||
|
|
716140d64d | ||
|
|
088cb4ef59 | ||
|
|
fa33e93a8e | ||
|
|
d1472fc351 | ||
|
|
5c8c0c31d8 | ||
|
|
7f3c00c7a2 | ||
|
|
c180dab791 | ||
|
|
f24acc21c7 | ||
|
|
40b637849d | ||
|
|
e7db1685df | ||
|
|
eccbfd1011 | ||
|
|
90211f6769 | ||
|
|
edc32ac18e | ||
|
|
fe68e020e3 | ||
|
|
81e1e3544d | ||
|
|
09372d5c35 | ||
|
|
078a89e4ca | ||
|
|
dbc6ae26a6 | ||
|
|
b6f429867a | ||
|
|
09f50660ce | ||
|
|
189825b495 | ||
|
|
441cad58d4 | ||
|
|
1277d08f9e | ||
|
|
e03de47db7 | ||
|
|
764b7ff610 | ||
|
|
307be7694e | ||
|
|
c76ae1ef39 | ||
|
|
d499db7f0e | ||
|
|
a894b9f29a | ||
|
|
902e6b3905 | ||
|
|
ed2d1c7bf9 | ||
|
|
14b73cbd47 | ||
|
|
a39c7aa864 | ||
|
|
170bc9c8ec | ||
|
|
365c01fc29 | ||
|
|
3124785a08 | ||
|
|
60e6306107 | ||
|
|
42ccb7830a | ||
|
|
0bb03b9292 | ||
|
|
ed6fbf1480 | ||
|
|
477cec6021 | ||
|
|
924500d111 | ||
|
|
0677504ef1 | ||
|
|
ca2a7c4d9c | ||
|
|
28606629ad | ||
|
|
c817279464 | ||
|
|
009d6ed8ed | ||
|
|
5cec1282a9 | ||
|
|
340170fd29 | ||
|
|
7ed0cc139a | ||
|
|
2c822213eb | ||
|
|
0894b9591c | ||
|
|
f0ca45f9a2 | ||
|
|
afc48c6485 | ||
|
|
93dce8a0cb | ||
|
|
149ccdaf39 | ||
|
|
c08bb39ffe | ||
|
|
5083d8ab34 | ||
|
|
7552a5dd07 | ||
|
|
c93d68f853 | ||
|
|
2b74db2dce | ||
|
|
cc6c91415d | ||
|
|
6d7d7e0adc | ||
|
|
2105d777f0 | ||
|
|
14338afbdb | ||
|
|
3e8aa4023d | ||
|
|
b443875e66 |
@@ -21,7 +21,7 @@ linters:
|
||||
linters-settings:
|
||||
gocognit:
|
||||
# TODO: We should target for < 50
|
||||
min-complexity: 65
|
||||
min-complexity: 69
|
||||
|
||||
output:
|
||||
print-issued-lines: true
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.2.0)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.1/src/engine)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.3.0)
|
||||
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
[](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
|
||||
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -205,7 +205,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.3.0-rc.4"
|
||||
consensus_spec_version = "v1.3.0-rc.5"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -221,7 +221,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "519da3cbb181fe927e41b0d13c3aaad5f5f38fe0ba87ca51bd09a661c738bd6c",
|
||||
sha256 = "266006512e71e62396e8f31be01639560c9d59a93c38220fd8f51fabefc8f5f3",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -237,7 +237,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "894404302d3d4b0f3080d3221204c19de4e837f1b129f468a66747103174412e",
|
||||
sha256 = "2ebf483830165909cb7961562fd369dedf079997a4832cc215a543898a73aa46",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -253,7 +253,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "ca7a594a2f4be1103e01b5a1416f75a328b7555eae8b26308c07f80fa6d0f255",
|
||||
sha256 = "333718ba5c907e0a99580caa8d28dd710543b3b271e4251581006d0e101fbce9",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -268,7 +268,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "b4ed6c077c5f0857361412515b319fc8b26730c7d701d3245b5e6849b3974a4f",
|
||||
sha256 = "78b6925b5a4208e32385fa4387d2c27b381a8ddd18d66d5a7787e7846b86bfc8",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -483,7 +483,7 @@ func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, e
|
||||
version := bytesutil.ToBytes4(vSlice)
|
||||
ofs = append(ofs, forks.ForkScheduleEntry{
|
||||
Version: version,
|
||||
Epoch: primitives.Epoch(uint64(epoch)),
|
||||
Epoch: primitives.Epoch(epoch),
|
||||
})
|
||||
}
|
||||
sort.Sort(ofs)
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"chain_info.go",
|
||||
"chain_info_forkchoice.go",
|
||||
"error.go",
|
||||
"execution_engine.go",
|
||||
"forkchoice_update_execution.go",
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
@@ -15,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -28,12 +28,25 @@ type ChainInfoFetcher interface {
|
||||
CanonicalFetcher
|
||||
ForkFetcher
|
||||
HeadDomainFetcher
|
||||
ForkchoiceFetcher
|
||||
}
|
||||
|
||||
// HeadUpdater defines a common interface for methods in blockchain service
|
||||
// which allow to update the head info
|
||||
type HeadUpdater interface {
|
||||
// ForkchoiceFetcher defines a common interface for methods that access directly
|
||||
// forkchoice information. These typically require a lock and external callers
|
||||
// are requested to call methods within this blockchain package that takes care
|
||||
// of locking forkchoice
|
||||
type ForkchoiceFetcher interface {
|
||||
Ancestor(context.Context, []byte, primitives.Slot) ([]byte, error)
|
||||
CachedHeadRoot() [32]byte
|
||||
GetProposerHead() [32]byte
|
||||
SetForkChoiceGenesisTime(uint64)
|
||||
UpdateHead(context.Context, primitives.Slot)
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
InsertNode(context.Context, state.BeaconState, [32]byte) error
|
||||
ForkChoiceDump(context.Context) (*ethpbv1.ForkChoiceDump, error)
|
||||
NewSlot(context.Context, primitives.Slot) error
|
||||
ProposerBoost() [32]byte
|
||||
}
|
||||
|
||||
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
|
||||
@@ -67,7 +80,6 @@ type HeadFetcher interface {
|
||||
|
||||
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain.
|
||||
type ForkFetcher interface {
|
||||
ForkChoicer() forkchoice.ForkChoicer
|
||||
CurrentFork() *ethpb.Fork
|
||||
GenesisFetcher
|
||||
TimeFetcher
|
||||
@@ -96,25 +108,25 @@ type OptimisticModeFetcher interface {
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
|
||||
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
cp := s.ForkChoicer().FinalizedCheckpoint()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpt returns the current justified checkpoint from chain store.
|
||||
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
cp := s.ForkChoicer().PreviousJustifiedCheckpoint()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
cp := s.cfg.ForkChoiceStore.PreviousJustifiedCheckpoint()
|
||||
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
}
|
||||
|
||||
// CurrentJustifiedCheckpt returns the current justified checkpoint from chain store.
|
||||
func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
cp := s.ForkChoicer().JustifiedCheckpoint()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
cp := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
}
|
||||
|
||||
@@ -277,8 +289,8 @@ func (s *Service) CurrentFork() *ethpb.Fork {
|
||||
|
||||
// IsCanonical returns true if the input block root is part of the canonical chain.
|
||||
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
// If the block has not been finalized, check fork choice store to see if the block is canonical
|
||||
if s.cfg.ForkChoiceStore.HasNode(blockRoot) {
|
||||
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
|
||||
@@ -288,14 +300,6 @@ func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, er
|
||||
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot), nil
|
||||
}
|
||||
|
||||
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
|
||||
// Heads roots and heads slots are returned.
|
||||
func (s *Service) ChainHeads() ([][32]byte, []primitives.Slot) {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
return s.cfg.ForkChoiceStore.Tips()
|
||||
}
|
||||
|
||||
// HeadPublicKeyToValidatorIndex returns the validator index of the `pubkey` in current head state.
|
||||
func (s *Service) HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool) {
|
||||
s.headLock.RLock()
|
||||
@@ -320,11 +324,6 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index primiti
|
||||
return v.PublicKey(), nil
|
||||
}
|
||||
|
||||
// ForkChoicer returns the forkchoice interface.
|
||||
func (s *Service) ForkChoicer() forkchoice.ForkChoicer {
|
||||
return s.cfg.ForkChoiceStore
|
||||
}
|
||||
|
||||
// IsOptimistic returns true if the current head is optimistic.
|
||||
func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
||||
if slots.ToEpoch(s.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
|
||||
@@ -334,8 +333,8 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
||||
headRoot := s.head.root
|
||||
s.headLock.RUnlock()
|
||||
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(headRoot)
|
||||
if err == nil {
|
||||
return optimistic, nil
|
||||
@@ -351,14 +350,14 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
||||
// IsFinalized returns true if the input root is finalized.
|
||||
// It first checks latest finalized root then checks finalized root index in DB.
|
||||
func (s *Service) IsFinalized(ctx context.Context, root [32]byte) bool {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
if s.ForkChoicer().FinalizedCheckpoint().Root == root {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
if s.cfg.ForkChoiceStore.FinalizedCheckpoint().Root == root {
|
||||
return true
|
||||
}
|
||||
// If node exists in our store, then it is not
|
||||
// finalized.
|
||||
if s.ForkChoicer().HasNode(root) {
|
||||
if s.cfg.ForkChoiceStore.HasNode(root) {
|
||||
return false
|
||||
}
|
||||
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, root)
|
||||
@@ -368,17 +367,17 @@ func (s *Service) IsFinalized(ctx context.Context, root [32]byte) bool {
|
||||
// This in particular means that the blockroot is a descendant of the
|
||||
// finalized checkpoint
|
||||
func (s *Service) InForkchoice(root [32]byte) bool {
|
||||
s.ForkChoicer().RLock()
|
||||
defer s.ForkChoicer().RUnlock()
|
||||
return s.ForkChoicer().HasNode(root)
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.HasNode(root)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
s.ForkChoicer().RLock()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
|
||||
s.ForkChoicer().RUnlock()
|
||||
s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err == nil {
|
||||
return optimistic, nil
|
||||
}
|
||||
@@ -436,12 +435,42 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
||||
return !isCanonical, nil
|
||||
}
|
||||
|
||||
// Ancestor returns the block root of an ancestry block from the input block root.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
|
||||
// block = store.blocks[root]
|
||||
// if block.slot > slot:
|
||||
// return get_ancestor(store, block.parent_root, slot)
|
||||
// elif block.slot == slot:
|
||||
// return root
|
||||
// else:
|
||||
// # root is older than queried slot, thus a skip slot. Return most recent root prior to slot
|
||||
// return root
|
||||
func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ancestor")
|
||||
defer span.End()
|
||||
|
||||
r := bytesutil.ToBytes32(root)
|
||||
// Get ancestor root from fork choice store instead of recursively looking up blocks in DB.
|
||||
// This is most optimal outcome.
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
ar, err := s.cfg.ForkChoiceStore.AncestorRoot(ctx, r, slot)
|
||||
s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err != nil {
|
||||
// Try getting ancestor root from DB when failed to retrieve from fork choice store.
|
||||
// This is the second line of defense for retrieving ancestor root.
|
||||
ar, err = s.ancestorByDB(ctx, r, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return ar[:], nil
|
||||
}
|
||||
|
||||
// SetGenesisTime sets the genesis time of beacon chain.
|
||||
func (s *Service) SetGenesisTime(t time.Time) {
|
||||
s.genesisTime = t
|
||||
}
|
||||
|
||||
// ForkChoiceStore returns the fork choice store in the service.
|
||||
func (s *Service) ForkChoiceStore() forkchoice.ForkChoicer {
|
||||
return s.cfg.ForkChoiceStore
|
||||
}
|
||||
|
||||
80
beacon-chain/blockchain/chain_info_forkchoice.go
Normal file
80
beacon-chain/blockchain/chain_info_forkchoice.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
)
|
||||
|
||||
// CachedHeadRoot returns the corresponding value from Forkchoice
|
||||
func (s *Service) CachedHeadRoot() [32]byte {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.CachedHeadRoot()
|
||||
}
|
||||
|
||||
// GetProposerHead returns the corresponding value from forkchoice
|
||||
func (s *Service) GetProposerHead() [32]byte {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.GetProposerHead()
|
||||
}
|
||||
|
||||
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
|
||||
func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(timestamp)
|
||||
}
|
||||
|
||||
// HighestReceivedBlockSlot returns the corresponding value from forkchoice
|
||||
func (s *Service) HighestReceivedBlockSlot() primitives.Slot {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.HighestReceivedBlockSlot()
|
||||
}
|
||||
|
||||
// ReceivedBlocksLastEpoch returns the corresponding value from forkchoice
|
||||
func (s *Service) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.ReceivedBlocksLastEpoch()
|
||||
}
|
||||
|
||||
// InsertNode is a wrapper for node insertion which is self locked
|
||||
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns the corresponding value from forkchoice
|
||||
func (s *Service) ForkChoiceDump(ctx context.Context) (*ethpbv1.ForkChoiceDump, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.ForkChoiceDump(ctx)
|
||||
}
|
||||
|
||||
// NewSlot returns the corresponding value from forkchoice
|
||||
func (s *Service) NewSlot(ctx context.Context, slot primitives.Slot) error {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.NewSlot(ctx, slot)
|
||||
}
|
||||
|
||||
// ProposerBoost wraps the corresponding method from forkchoice
|
||||
func (s *Service) ProposerBoost() [32]byte {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.ProposerBoost()
|
||||
}
|
||||
|
||||
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
|
||||
// Heads roots and heads slots are returned.
|
||||
func (s *Service) ChainHeads() ([][32]byte, []primitives.Slot) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.Tips()
|
||||
}
|
||||
@@ -71,12 +71,6 @@ func TestHeadRoot_Nil(t *testing.T) {
|
||||
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], headRoot, "Incorrect pre chain start value")
|
||||
}
|
||||
|
||||
func TestService_ForkChoiceStore(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
p := c.ForkChoiceStore()
|
||||
require.Equal(t, primitives.Epoch(0), p.FinalizedCheckpoint().Epoch)
|
||||
}
|
||||
|
||||
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
@@ -559,7 +553,7 @@ func TestService_IsFinalized(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}}
|
||||
r1 := [32]byte{'a'}
|
||||
require.NoError(t, c.ForkChoiceStore().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Root: r1,
|
||||
}))
|
||||
b := util.NewBeaconBlock()
|
||||
|
||||
@@ -60,8 +60,8 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
log.WithError(err).Error("Could not get execution payload for head block")
|
||||
return nil, nil
|
||||
}
|
||||
finalizedHash := s.ForkChoicer().FinalizedPayloadBlockHash()
|
||||
justifiedHash := s.ForkChoicer().JustifiedPayloadBlockHash()
|
||||
finalizedHash := s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
|
||||
justifiedHash := s.cfg.ForkChoiceStore.JustifiedPayloadBlockHash()
|
||||
fcs := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: headPayload.BlockHash(),
|
||||
SafeBlockHash: justifiedHash[:],
|
||||
@@ -88,7 +88,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
if len(lastValidHash) == 0 {
|
||||
lastValidHash = defaultLatestValidHash
|
||||
}
|
||||
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not set head root to invalid")
|
||||
return nil, nil
|
||||
@@ -224,7 +224,7 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, root, blk.Block().ParentRoot(), bytesutil.ToBytes32(lastValidHash))
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, blk.Block().ParentRoot(), bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -301,7 +301,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
|
||||
var attr payloadattribute.Attributer
|
||||
switch st.Version() {
|
||||
case version.Capella:
|
||||
case version.Capella, version.Deneb:
|
||||
withdrawals, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
|
||||
@@ -89,13 +89,13 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot
|
||||
// shouldOverrideFCU checks whether the incoming block is still subject to being
|
||||
// reorged or not by the next proposer.
|
||||
func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitives.Slot) bool {
|
||||
headWeight, err := s.ForkChoicer().Weight(newHeadRoot)
|
||||
headWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
currentSlot := s.CurrentSlot()
|
||||
if proposingSlot == currentSlot {
|
||||
proposerHead := s.ForkChoicer().GetProposerHead()
|
||||
proposerHead := s.cfg.ForkChoiceStore.GetProposerHead()
|
||||
if proposerHead != newHeadRoot {
|
||||
return true
|
||||
}
|
||||
@@ -106,7 +106,7 @@ func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitiv
|
||||
params.BeaconConfig().SecondsPerSlot)
|
||||
lateBlockFailedAttemptSecondThreshold.Inc()
|
||||
} else {
|
||||
if s.ForkChoicer().ShouldOverrideFCU() {
|
||||
if s.cfg.ForkChoiceStore.ShouldOverrideFCU() {
|
||||
return true
|
||||
}
|
||||
secs, err := slots.SecondsSinceSlotStart(currentSlot,
|
||||
|
||||
@@ -28,6 +28,8 @@ import (
|
||||
// UpdateAndSaveHeadWithBalances updates the beacon state head after getting justified balanced from cache.
|
||||
// This function is only used in spec-tests, it does save the head after updating it.
|
||||
func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not update head")
|
||||
@@ -94,18 +96,18 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
}
|
||||
oldHeadRoot := bytesutil.ToBytes32(r)
|
||||
if headBlock.Block().ParentRoot() != oldHeadRoot {
|
||||
commonRoot, forkSlot, err := s.ForkChoicer().CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
|
||||
commonRoot, forkSlot, err := s.cfg.ForkChoiceStore.CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not find common ancestor root")
|
||||
commonRoot = params.BeaconConfig().ZeroHash
|
||||
}
|
||||
dis := headSlot + newHeadSlot - 2*forkSlot
|
||||
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
|
||||
oldWeight, err := s.ForkChoicer().Weight(oldHeadRoot)
|
||||
oldWeight, err := s.cfg.ForkChoiceStore.Weight(oldHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
newWeight, err := s.ForkChoicer().Weight(newHeadRoot)
|
||||
newWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
@@ -123,7 +125,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
reorgDistance.Observe(float64(dis))
|
||||
reorgDepth.Observe(float64(dep))
|
||||
|
||||
isOptimistic, err := s.ForkChoicer().IsOptimistic(newHeadRoot)
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if node is optimistically synced")
|
||||
}
|
||||
@@ -363,7 +365,7 @@ func (s *Service) notifyNewHeadEvent(
|
||||
// This saves the Attestations and BLSToExecChanges between `orphanedRoot` and the common ancestor root that is derived using `newHeadRoot`.
|
||||
// It also filters out the attestations that is one epoch older as a defense so invalid attestations don't flow into the attestation pool.
|
||||
func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
|
||||
commonAncestorRoot, _, err := s.ForkChoicer().CommonAncestor(ctx, newHeadRoot, orphanedRoot)
|
||||
commonAncestorRoot, _, err := s.cfg.ForkChoiceStore.CommonAncestor(ctx, newHeadRoot, orphanedRoot)
|
||||
switch {
|
||||
// Exit early if there's no common ancestor and root doesn't exist, there would be nothing to save.
|
||||
case errors.Is(err, forkchoice.ErrUnknownCommonAncestor):
|
||||
|
||||
@@ -308,7 +308,7 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
@@ -373,7 +373,7 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
@@ -431,7 +431,7 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
@@ -490,7 +490,7 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
|
||||
@@ -60,7 +60,13 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
log = log.WithField("txCount", len(txs))
|
||||
txsPerSlotCount.Set(float64(len(txs)))
|
||||
}
|
||||
|
||||
}
|
||||
if b.Version() >= version.Deneb {
|
||||
k, err := b.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("blobCount", len(k))
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
return nil
|
||||
@@ -96,6 +102,7 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"version": version.String(block.Version()),
|
||||
}).Info("Synced new block")
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -108,13 +108,13 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
}
|
||||
|
||||
// Verify that the parent block is in forkchoice
|
||||
if !s.ForkChoicer().HasNode(b.ParentRoot()) {
|
||||
if !s.cfg.ForkChoiceStore.HasNode(b.ParentRoot()) {
|
||||
return ErrNotDescendantOfFinalized
|
||||
}
|
||||
|
||||
// Save current justified and finalized epochs for future use.
|
||||
currStoreJustifiedEpoch := s.ForkChoicer().JustifiedCheckpoint().Epoch
|
||||
currStoreFinalizedEpoch := s.ForkChoicer().FinalizedCheckpoint().Epoch
|
||||
currStoreJustifiedEpoch := s.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch
|
||||
currStoreFinalizedEpoch := s.cfg.ForkChoiceStore.FinalizedCheckpoint().Epoch
|
||||
preStateFinalizedEpoch := preState.FinalizedCheckpoint().Epoch
|
||||
preStateJustifiedEpoch := preState.CurrentJustifiedCheckpoint().Epoch
|
||||
|
||||
@@ -187,18 +187,18 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
}()
|
||||
}
|
||||
|
||||
justified := s.ForkChoicer().JustifiedCheckpoint()
|
||||
justified := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
start := time.Now()
|
||||
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
if blockRoot != headRoot {
|
||||
receivedWeight, err := s.ForkChoicer().Weight(blockRoot)
|
||||
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
headWeight, err := s.ForkChoicer().Weight(headRoot)
|
||||
headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
@@ -252,7 +252,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
|
||||
// Save finalized check point to db and more.
|
||||
postStateFinalizedEpoch := postState.FinalizedCheckpoint().Epoch
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
if finalized.Epoch > currStoreFinalizedEpoch || (finalized.Epoch == postStateFinalizedEpoch && finalized.Epoch > preStateFinalizedEpoch) {
|
||||
if err := s.updateFinalized(ctx, ðpb.Checkpoint{Epoch: finalized.Epoch, Root: finalized.Root[:]}); err != nil {
|
||||
return err
|
||||
@@ -576,7 +576,7 @@ func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashing
|
||||
for _, slashing := range slashings {
|
||||
indices := blocks.SlashableAttesterIndices(slashing)
|
||||
for _, index := range indices {
|
||||
s.ForkChoicer().InsertSlashedIndex(ctx, primitives.ValidatorIndex(index))
|
||||
s.cfg.ForkChoiceStore.InsertSlashedIndex(ctx, primitives.ValidatorIndex(index))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,7 +85,7 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.ReadOnlyBe
|
||||
// verifyBlkFinalizedSlot validates input block is not less than or equal
|
||||
// to current finalized slot.
|
||||
func (s *Service) verifyBlkFinalizedSlot(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
finalizedSlot, err := slots.EpochStart(finalized.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -146,39 +146,6 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
|
||||
return nil
|
||||
}
|
||||
|
||||
// ancestor returns the block root of an ancestry block from the input block root.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
|
||||
// block = store.blocks[root]
|
||||
// if block.slot > slot:
|
||||
// return get_ancestor(store, block.parent_root, slot)
|
||||
// elif block.slot == slot:
|
||||
// return root
|
||||
// else:
|
||||
// # root is older than queried slot, thus a skip slot. Return most recent root prior to slot
|
||||
// return root
|
||||
func (s *Service) ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ancestor")
|
||||
defer span.End()
|
||||
|
||||
r := bytesutil.ToBytes32(root)
|
||||
// Get ancestor root from fork choice store instead of recursively looking up blocks in DB.
|
||||
// This is most optimal outcome.
|
||||
ar, err := s.cfg.ForkChoiceStore.AncestorRoot(ctx, r, slot)
|
||||
if err != nil {
|
||||
// Try getting ancestor root from DB when failed to retrieve from fork choice store.
|
||||
// This is the second line of defense for retrieving ancestor root.
|
||||
ar, err = s.ancestorByDB(ctx, r, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return ar[:], nil
|
||||
}
|
||||
|
||||
// This retrieves an ancestor root using DB. The look up is recursively looking up DB. Slower than `ancestorByForkChoiceStore`.
|
||||
func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.Slot) (root [32]byte, err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ancestorByDB")
|
||||
@@ -210,7 +177,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
|
||||
// Fork choice only matters from last finalized slot.
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
fSlot, err := slots.EpochStart(finalized.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -236,7 +203,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
if len(pendingNodes) == 1 {
|
||||
return nil
|
||||
}
|
||||
if root != s.ensureRootNotZeros(finalized.Root) && !s.ForkChoicer().HasNode(root) {
|
||||
if root != s.ensureRootNotZeros(finalized.Root) && !s.cfg.ForkChoiceStore.HasNode(root) {
|
||||
return ErrNotDescendantOfFinalized
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes)
|
||||
|
||||
@@ -139,7 +139,7 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fRoot := bytesutil.ToBytes32(roots[0])
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: fRoot}))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: fRoot}))
|
||||
root, err := tt.blk.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(tt.blk)
|
||||
@@ -288,9 +288,9 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
r0 := bytesutil.ToBytes32(roots[0])
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, r0, service.originBlockRoot, [32]byte{}, fcp, fcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
fcp2 := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(fcp2))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
@@ -339,9 +339,9 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
r0 := bytesutil.ToBytes32(roots[0])
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, r0, service.originBlockRoot, [32]byte{}, fcp, fcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
fcp2 := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(fcp2))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
@@ -406,7 +406,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
|
||||
// Set finalized epoch to 2.
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 2, Root: r64}))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 2, Root: r64}))
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
@@ -600,14 +600,14 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
}
|
||||
|
||||
// Slots 100 to 200 are skip slots. Requesting root at 150 will yield root at 100. The last physical block.
|
||||
r, err := service.ancestor(context.Background(), r200[:], 150)
|
||||
r, err := service.Ancestor(context.Background(), r200[:], 150)
|
||||
require.NoError(t, err)
|
||||
if bytesutil.ToBytes32(r) != r100 {
|
||||
t.Error("Did not get correct root")
|
||||
}
|
||||
|
||||
// Slots 1 to 100 are skip slots. Requesting root at 50 will yield root at 1. The last physical block.
|
||||
r, err = service.ancestor(context.Background(), r200[:], 50)
|
||||
r, err = service.Ancestor(context.Background(), r200[:], 50)
|
||||
require.NoError(t, err)
|
||||
if bytesutil.ToBytes32(r) != r1 {
|
||||
t.Error("Did not get correct root")
|
||||
@@ -648,7 +648,7 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
}
|
||||
|
||||
r, err := service.ancestor(context.Background(), r200[:], 150)
|
||||
r, err := service.Ancestor(context.Background(), r200[:], 150)
|
||||
require.NoError(t, err)
|
||||
if bytesutil.ToBytes32(r) != r100 {
|
||||
t.Error("Did not get correct root")
|
||||
@@ -696,7 +696,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
r, err := service.ancestor(context.Background(), r200[:], 150)
|
||||
r, err := service.Ancestor(context.Background(), r200[:], 150)
|
||||
require.NoError(t, err)
|
||||
if bytesutil.ToBytes32(r) != r100 {
|
||||
t.Error("Did not get correct root")
|
||||
@@ -1363,7 +1363,7 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 1}))
|
||||
blk := util.HydrateBeaconBlock(ðpb.BeaconBlock{Slot: 1})
|
||||
wb, err := consensusblocks.NewBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
@@ -1459,7 +1459,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.ForkChoicer().JustifiedCheckpoint()
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), jc.Epoch)
|
||||
|
||||
// import a block that justifies the second epoch
|
||||
@@ -1474,14 +1474,14 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
jc = service.ForkChoicer().JustifiedCheckpoint()
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
|
||||
sjc := validHeadState.CurrentJustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), sjc.Epoch)
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: lvh}
|
||||
@@ -1500,7 +1500,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
// Check that forkchoice's head is the last invalid block imported. The
|
||||
// store's headroot is the previous head (since the invalid block did
|
||||
// not finish importing) one and that the node is optimistic
|
||||
require.Equal(t, root, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot))
|
||||
@@ -1522,7 +1522,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
require.Equal(t, root, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, jc.Epoch, sjc.Epoch)
|
||||
require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root))
|
||||
@@ -1619,7 +1619,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.ForkChoicer().JustifiedCheckpoint()
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), jc.Epoch)
|
||||
|
||||
// import a block that justifies the second epoch
|
||||
@@ -1634,14 +1634,14 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
jc = service.ForkChoicer().JustifiedCheckpoint()
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
|
||||
sjc := validHeadState.CurrentJustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), sjc.Epoch)
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrInvalidPayloadStatus, NewPayloadResp: lvh}
|
||||
@@ -1660,7 +1660,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
// not finish importing and it was never imported to forkchoice). Check
|
||||
// also that the node is optimistic
|
||||
require.Equal(t, firstInvalidRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot))
|
||||
@@ -1682,7 +1682,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
require.Equal(t, root, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, jc.Epoch, sjc.Epoch)
|
||||
require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root))
|
||||
@@ -1802,9 +1802,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
jc := service.ForkChoicer().JustifiedCheckpoint()
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
invalidHeadRoot := service.ForkChoicer().CachedHeadRoot()
|
||||
invalidHeadRoot := service.cfg.ForkChoiceStore.CachedHeadRoot()
|
||||
|
||||
// import block 19 to find out that the whole chain 13--18 was in fact
|
||||
// invalid
|
||||
@@ -1825,7 +1825,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
// not finish importing and it was never imported to forkchoice). Check
|
||||
// also that the node is optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, invalidHeadRoot, bytesutil.ToBytes32(headRoot))
|
||||
@@ -1855,7 +1855,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
// Check that the head is still INVALID and the node is still optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
@@ -1877,7 +1877,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Head should still be INVALID and the node optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
@@ -1893,7 +1893,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.ForkChoicer().CachedHeadRoot())
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, primitives.Epoch(4), sjc.Epoch)
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
@@ -2011,7 +2011,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.onBlock(ctx, wsb, root))
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
jc := service.ForkChoicer().JustifiedCheckpoint()
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
|
||||
// import block 19 to find out that the whole chain 13--18 was in fact
|
||||
@@ -2045,7 +2045,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.StartFromSavedState(genesisState))
|
||||
|
||||
// Forkchoice has the genesisRoot loaded at startup
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.ForkChoicer().CachedHeadRoot()))
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
// Service's store has the finalized state as headRoot
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2072,7 +2072,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
// We use onBlockBatch here because the valid chain is missing in forkchoice
|
||||
require.NoError(t, service.onBlockBatch(ctx, []interfaces.ReadOnlySignedBeaconBlock{wsb}, [][32]byte{root}))
|
||||
// Check that the head is now VALID and the node is not optimistic
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.ForkChoicer().CachedHeadRoot()))
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
headRoot, err = service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, bytesutil.ToBytes32(headRoot))
|
||||
|
||||
@@ -56,7 +56,7 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r, err := s.ancestor(ctx, a.Data.BeaconBlockRoot, targetSlot)
|
||||
r, err := s.Ancestor(ctx, a.Data.BeaconBlockRoot, targetSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -100,17 +100,15 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-pat.C():
|
||||
s.ForkChoicer().Lock()
|
||||
s.UpdateHead(s.ctx, s.CurrentSlot()+1)
|
||||
s.ForkChoicer().Unlock()
|
||||
case <-st.C():
|
||||
s.ForkChoicer().Lock()
|
||||
if err := s.ForkChoicer().NewSlot(s.ctx, s.CurrentSlot()); err != nil {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, s.CurrentSlot()); err != nil {
|
||||
log.WithError(err).Error("could not process new slot")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
s.UpdateHead(s.ctx, s.CurrentSlot())
|
||||
s.ForkChoicer().Unlock()
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -120,7 +118,8 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
|
||||
// The caller of this function MUST hold a lock in forkchoice
|
||||
func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot) {
|
||||
start := time.Now()
|
||||
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
// This function is only called at 10 seconds or 0 seconds into the slot
|
||||
disparity := params.BeaconNetworkConfig().MaximumGossipClockDisparity
|
||||
if !features.Get().DisableReorgLateBlocks {
|
||||
|
||||
@@ -67,12 +67,12 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
cp := s.ForkChoicer().FinalizedCheckpoint()
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
finalized := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
|
||||
// Log block sync status.
|
||||
cp = s.ForkChoicer().JustifiedCheckpoint()
|
||||
cp = s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
justified := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
|
||||
log.WithError(err).Error("Unable to log block sync status")
|
||||
@@ -123,7 +123,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
|
||||
})
|
||||
|
||||
// Reports on blockCopy and fork choice metrics.
|
||||
cp := s.ForkChoicer().FinalizedCheckpoint()
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
finalized := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
}
|
||||
@@ -131,7 +131,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
@@ -152,8 +152,8 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
|
||||
|
||||
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
|
||||
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
|
||||
s.ForkChoicer().Lock()
|
||||
defer s.ForkChoicer().Unlock()
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, []*ethpb.AttesterSlashing{slashing})
|
||||
}
|
||||
|
||||
@@ -207,7 +207,7 @@ func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
|
||||
currentEpoch := slots.ToEpoch(s.CurrentSlot())
|
||||
// Prevent `sinceFinality` going underflow.
|
||||
var sinceFinality primitives.Epoch
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
}
|
||||
|
||||
@@ -57,6 +57,11 @@ type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar) error {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
|
||||
@@ -27,6 +27,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -67,11 +68,12 @@ type ChainService struct {
|
||||
ReceiveBlockMockErr error
|
||||
OptimisticCheckRootReceived [32]byte
|
||||
FinalizedRoots map[[32]byte]bool
|
||||
OptimisticRoots map[[32]byte]bool
|
||||
}
|
||||
|
||||
// ForkChoicer mocks the same method in the chain service
|
||||
func (s *ChainService) ForkChoicer() forkchoice.ForkChoicer {
|
||||
return s.ForkChoiceStore
|
||||
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
r, err := s.ForkChoiceStore.AncestorRoot(ctx, bytesutil.ToBytes32(root), slot)
|
||||
return r[:], err
|
||||
}
|
||||
|
||||
// StateNotifier mocks the same method in the chain service.
|
||||
@@ -457,7 +459,7 @@ func (s *ChainService) InForkchoice(_ [32]byte) bool {
|
||||
// IsOptimisticForRoot mocks the same method in the chain service.
|
||||
func (s *ChainService) IsOptimisticForRoot(_ context.Context, root [32]byte) (bool, error) {
|
||||
s.OptimisticCheckRootReceived = root
|
||||
return s.Optimistic, nil
|
||||
return s.OptimisticRoots[root], nil
|
||||
}
|
||||
|
||||
// UpdateHead mocks the same method in the chain service.
|
||||
@@ -467,7 +469,7 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("could not update head")
|
||||
}
|
||||
err = s.ForkChoicer().InsertNode(ctx, st, root)
|
||||
err = s.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("could not insert node to forkchoice")
|
||||
}
|
||||
@@ -514,3 +516,74 @@ func prepareForkchoiceState(
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
return st, blockRoot, err
|
||||
}
|
||||
|
||||
// CachedHeadRoot mocks the same method in the chain service
|
||||
func (s *ChainService) CachedHeadRoot() [32]byte {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.CachedHeadRoot()
|
||||
}
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// GetProposerHead mocks the same method in the chain service
|
||||
func (s *ChainService) GetProposerHead() [32]byte {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.GetProposerHead()
|
||||
}
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// SetForkchoiceGenesisTime mocks the same method in the chain service
|
||||
func (s *ChainService) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
s.ForkChoiceStore.SetGenesisTime(timestamp)
|
||||
}
|
||||
}
|
||||
|
||||
// ReceivedBlocksLastEpoch mocks the same method in the chain service
|
||||
func (s *ChainService) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.ReceivedBlocksLastEpoch()
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// HighestReceivedBlockSlot mocks the same method in the chain service
|
||||
func (s *ChainService) HighestReceivedBlockSlot() primitives.Slot {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.HighestReceivedBlockSlot()
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// InsertNode mocks the same method in the chain service
|
||||
func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForkChoiceDump mocks the same method in the chain service
|
||||
func (s *ChainService) ForkChoiceDump(ctx context.Context) (*ethpbv1.ForkChoiceDump, error) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.ForkChoiceDump(ctx)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NewSlot mocks the same method in the chain service
|
||||
func (s *ChainService) NewSlot(ctx context.Context, slot primitives.Slot) error {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.NewSlot(ctx, slot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProposerBoost mocks the same method in the chain service
|
||||
func (s *ChainService) ProposerBoost() [32]byte {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.ProposerBoost()
|
||||
}
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
wsVerifier: wv,
|
||||
}
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: tt.finalizedEpoch}))
|
||||
cp := s.ForkChoicer().FinalizedCheckpoint()
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
err = s.wsVerifier.VerifyWeakSubjectivity(context.Background(), cp.Epoch)
|
||||
if tt.wantErr == nil {
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -9,6 +9,110 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// UpgradeToDeneb updates inputs a generic state to return the version Deneb state.
|
||||
func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) {
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
currentSyncCommittee, err := state.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSyncCommittee, err := state.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevEpochParticipation, err := state.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentEpochParticipation, err := state.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inactivityScores, err := state.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHeader, err := state.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txRoot, err := payloadHeader.TransactionsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wdRoot, err := payloadHeader.WithdrawalsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wi, err := state.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vi, err := state.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
summarires, err := state.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateDeneb{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: state.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().DenebForkVersion,
|
||||
Epoch: epoch,
|
||||
},
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: [][]byte{},
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
Validators: state.Validators(),
|
||||
Balances: state.Balances(),
|
||||
RandaoMixes: state.RandaoMixes(),
|
||||
Slashings: state.Slashings(),
|
||||
PreviousEpochParticipation: prevEpochParticipation,
|
||||
CurrentEpochParticipation: currentEpochParticipation,
|
||||
JustificationBits: state.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: state.FinalizedCheckpoint(),
|
||||
InactivityScores: inactivityScores,
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payloadHeader.ParentHash(),
|
||||
FeeRecipient: payloadHeader.FeeRecipient(),
|
||||
StateRoot: payloadHeader.StateRoot(),
|
||||
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
|
||||
LogsBloom: payloadHeader.LogsBloom(),
|
||||
PrevRandao: payloadHeader.PrevRandao(),
|
||||
BlockNumber: payloadHeader.BlockNumber(),
|
||||
GasLimit: payloadHeader.GasLimit(),
|
||||
GasUsed: payloadHeader.GasUsed(),
|
||||
Timestamp: payloadHeader.Timestamp(),
|
||||
ExtraData: payloadHeader.ExtraData(),
|
||||
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
|
||||
BlockHash: payloadHeader.BlockHash(),
|
||||
ExcessDataGas: make([]byte, 32),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
},
|
||||
NextWithdrawalIndex: wi,
|
||||
NextWithdrawalValidatorIndex: vi,
|
||||
HistoricalSummaries: summarires,
|
||||
}
|
||||
|
||||
return state_native.InitializeFromProtoUnsafeDeneb(s)
|
||||
}
|
||||
|
||||
// UpgradeToCapella updates a generic state to return the version Capella state.
|
||||
func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
@@ -353,7 +353,7 @@ func ProcessRandaoMixesReset(state state.BeaconState) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// ProcessHistoricalDataUpdate processes the updates to historical data during epoch processing.
|
||||
// From Capella onward, per spec,state's historical summaries are updated instead of historical roots.
|
||||
// From Capella onward, per spec, state's historical summaries are updated instead of historical roots.
|
||||
func ProcessHistoricalDataUpdate(state state.BeaconState) (state.BeaconState, error) {
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
nextEpoch := currentEpoch + 1
|
||||
|
||||
@@ -81,6 +81,15 @@ func CanUpgradeToCapella(slot primitives.Slot) bool {
|
||||
return epochStart && capellaEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToDeneb returns true if the input `slot` can upgrade to Deneb.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == DENEB_FORK_EPOCH
|
||||
func CanUpgradeToDeneb(slot primitives.Slot) bool {
|
||||
epochStart := slots.IsEpochStart(slot)
|
||||
DenebEpoch := slots.ToEpoch(slot) == params.BeaconConfig().DenebForkEpoch
|
||||
return epochStart && DenebEpoch
|
||||
}
|
||||
|
||||
// CanProcessEpoch checks the eligibility to process epoch.
|
||||
// The epoch can be processed at the end of the last slot of every epoch.
|
||||
//
|
||||
|
||||
@@ -23,7 +23,6 @@ go_library(
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
@@ -46,6 +45,7 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_protolambda_go_kzg//eth:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
|
||||
@@ -302,6 +302,14 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if time.CanUpgradeToDeneb(state.Slot()) {
|
||||
state, err = capella.UpgradeToDeneb(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if highestSlot < state.Slot() {
|
||||
|
||||
@@ -6,16 +6,18 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/protolambda/go-kzg/eth"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
|
||||
b "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition/interop"
|
||||
v "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -57,9 +59,6 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
||||
defer span.End()
|
||||
var err error
|
||||
|
||||
interop.WriteBlockToDisk(signed, false /* Has the block failed */)
|
||||
interop.WriteStateToDisk(st)
|
||||
|
||||
parentRoot := signed.Block().ParentRoot()
|
||||
st, err = ProcessSlotsUsingNextSlotCache(ctx, st, parentRoot[:], signed.Block().Slot())
|
||||
if err != nil {
|
||||
@@ -256,7 +255,7 @@ func ProcessOperationsNoVerifyAttsSigs(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case version.Altair, version.Bellatrix, version.Capella:
|
||||
case version.Altair, version.Bellatrix, version.Capella, version.Deneb:
|
||||
state, err = altairOperations(ctx, state, signedBeaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -356,9 +355,48 @@ func ProcessBlockForStateRoot(
|
||||
return nil, errors.Wrap(err, "process_sync_aggregate failed")
|
||||
}
|
||||
|
||||
if signed.Block().Version() == version.Deneb {
|
||||
err := ValidateBlobKzgs(ctx, signed.Block().Body())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not validate blob kzgs")
|
||||
}
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// ValidateBlobKzgs validates the blob kzgs in the beacon block.
|
||||
//
|
||||
// Spec code:
|
||||
// def process_blob_kzg_commitments(state: BeaconState, body: BeaconBlockBody):
|
||||
//
|
||||
// assert verify_kzg_commitments_against_transactions(body.execution_payload.transactions, body.blob_kzg_commitments)
|
||||
func ValidateBlobKzgs(ctx context.Context, body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
_, span := trace.StartSpan(ctx, "core.state.ValidateBlobKzgs")
|
||||
defer span.End()
|
||||
|
||||
payload, err := body.Execution()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get execution payload from block")
|
||||
}
|
||||
blkKzgs, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob kzg commitments from block")
|
||||
}
|
||||
kzgs := make(eth.KZGCommitmentSequenceImpl, len(blkKzgs))
|
||||
for i := range blkKzgs {
|
||||
kzgs[i] = bytesutil.ToBytes48(blkKzgs[i])
|
||||
}
|
||||
txs, err := payload.Transactions()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get transactions from payload")
|
||||
}
|
||||
if err := eth.VerifyKZGCommitmentsAgainstTransactions(txs, kzgs); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This calls altair block operations.
|
||||
func altairOperations(
|
||||
ctx context.Context,
|
||||
|
||||
@@ -54,6 +54,11 @@ type ReadOnlyDatabase interface {
|
||||
// Fee recipients operations.
|
||||
FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error)
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
|
||||
// Blob operations.
|
||||
BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error)
|
||||
BlobSidecarsBySlot(ctx context.Context, slot primitives.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error)
|
||||
|
||||
// origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
@@ -89,6 +94,10 @@ type NoHeadAccessDatabase interface {
|
||||
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, addrs []common.Address) error
|
||||
SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error
|
||||
|
||||
// Blob operations.
|
||||
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.BlobSidecar) error
|
||||
DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"archived_point.go",
|
||||
"backup.go",
|
||||
"blob.go",
|
||||
"blocks.go",
|
||||
"checkpoint.go",
|
||||
"deposit_contract.go",
|
||||
@@ -74,6 +75,7 @@ go_test(
|
||||
srcs = [
|
||||
"archived_point_test.go",
|
||||
"backup_test.go",
|
||||
"blob_test.go",
|
||||
"blocks_test.go",
|
||||
"checkpoint_test.go",
|
||||
"deposit_contract_test.go",
|
||||
@@ -107,9 +109,11 @@ go_test(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/assertions:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
|
||||
182
beacon-chain/db/kv/blob.go
Normal file
182
beacon-chain/db/kv/blob.go
Normal file
@@ -0,0 +1,182 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// SaveBlobSidecar saves the blobs for a given epoch in the sidecar bucket. When we receive a blob:
|
||||
//
|
||||
// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_BLOB_EPOCHS*SLOTS_PER_EPOCH
|
||||
//
|
||||
// 2. Compute key for blob as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||
//
|
||||
// 3. Begin the save algorithm: If the incoming blob has a slot bigger than the saved slot at the spot
|
||||
// in the rotating keys buffer, we overwrite all elements for that slot.
|
||||
func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlobSidecar")
|
||||
defer span.End()
|
||||
|
||||
if len(scs) == 0 {
|
||||
return errors.New("nil or empty blob sidecars")
|
||||
}
|
||||
slot := scs[0].Slot
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
encodedBlobSidecar, err := encode(ctx, ðpb.BlobSidecars{Sidecars: scs})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bkt := tx.Bucket(blobsBucket)
|
||||
c := bkt.Cursor()
|
||||
newKey := blobSidecarKey(scs[0])
|
||||
rotatingBufferPrefix := newKey[0:8]
|
||||
var replacingKey []byte
|
||||
for k, _ := c.Seek(rotatingBufferPrefix); bytes.HasPrefix(k, rotatingBufferPrefix); k, _ = c.Next() {
|
||||
if len(k) != 0 {
|
||||
replacingKey = k
|
||||
oldSlotBytes := replacingKey[8:16]
|
||||
oldSlot := bytesutil.BytesToSlotBigEndian(oldSlotBytes)
|
||||
if oldSlot >= slot {
|
||||
return fmt.Errorf("attempted to save blob with slot %d but already have older blob with slot %d", slot, oldSlot)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
// If there is no element stored at blob.slot % MAX_SLOTS_TO_PERSIST_BLOBS, then we simply
|
||||
// store the blob by key and exit early.
|
||||
if len(replacingKey) == 0 {
|
||||
return bkt.Put(newKey, encodedBlobSidecar)
|
||||
}
|
||||
|
||||
if err := bkt.Delete(replacingKey); err != nil {
|
||||
log.WithError(err).Warnf("Could not delete blob with key %#x", replacingKey)
|
||||
}
|
||||
return bkt.Put(newKey, encodedBlobSidecar)
|
||||
})
|
||||
}
|
||||
|
||||
// BlobSidecarsByRoot retrieves the blobs for the given beacon block root.
|
||||
// If the `indices` argument is omitted, all blobs for the root will be returned.
|
||||
// Otherwise, the result will be filtered to only include the specified indices.
|
||||
// An error will result if an invalid index is specified.
|
||||
func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsByRoot")
|
||||
defer span.End()
|
||||
|
||||
var enc []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket(blobsBucket).Cursor()
|
||||
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if bytes.HasSuffix(k, root[:]) {
|
||||
enc = v
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if enc == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
sc := ðpb.BlobSidecars{}
|
||||
if err := decode(ctx, enc, sc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return filterForIndices(sc, indices...)
|
||||
}
|
||||
|
||||
func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
if len(indices) == 0 {
|
||||
return sc.Sidecars, nil
|
||||
}
|
||||
// NB: This loop assumes that the BlobSidecars value stores the complete set of blobs for a block
|
||||
// in ascending order from eg 0..3, without gaps. This allows us to assume the indices argument
|
||||
// maps 1:1 with indices in the BlobSidecars storage object.
|
||||
maxIdx := uint64(len(sc.Sidecars)) - 1
|
||||
sidecars := make([]*ethpb.BlobSidecar, len(indices))
|
||||
for i, idx := range indices {
|
||||
if idx > maxIdx {
|
||||
return nil, errors.Wrapf(ErrNotFound, "BlobSidecars missing index: index %d", idx)
|
||||
}
|
||||
sidecars[i] = sc.Sidecars[idx]
|
||||
}
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// BlobSidecarsBySlot retrieves BlobSidecars for the given slot.
|
||||
// If the `indices` argument is omitted, all blobs for the root will be returned.
|
||||
// Otherwise, the result will be filtered to only include the specified indices.
|
||||
// An error will result if an invalid index is specified.
|
||||
func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsBySlot")
|
||||
defer span.End()
|
||||
|
||||
var enc []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket(blobsBucket).Cursor()
|
||||
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
slotInKey := bytesutil.BytesToSlotBigEndian(k[8:16])
|
||||
if slotInKey == slot {
|
||||
enc = v
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if enc == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
sc := ðpb.BlobSidecars{}
|
||||
if err := decode(ctx, enc, sc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return filterForIndices(sc, indices...)
|
||||
}
|
||||
|
||||
// DeleteBlobSidecar returns true if the blobs are in the db.
|
||||
func (s *Store) DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.DeleteBlobSidecar")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blobsBucket)
|
||||
c := bkt.Cursor()
|
||||
for k, _ := c.First(); k != nil; k, _ = c.Next() {
|
||||
if bytes.HasSuffix(k, beaconBlockRoot[:]) {
|
||||
if err := bkt.Delete(k); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// We define a blob sidecar key as: bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||
// where slot_to_rotating_buffer(slot) = slot % MAX_SLOTS_TO_PERSIST_BLOBS.
|
||||
func blobSidecarKey(blob *ethpb.BlobSidecar) []byte {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
maxEpochsToPersistBlobs := params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||
maxSlotsToPersistBlobs := types.Slot(maxEpochsToPersistBlobs.Mul(uint64(slotsPerEpoch)))
|
||||
slotInRotatingBuffer := blob.Slot.ModSlot(maxSlotsToPersistBlobs)
|
||||
key := bytesutil.SlotToBytesBigEndian(slotInRotatingBuffer)
|
||||
key = append(key, bytesutil.SlotToBytesBigEndian(blob.Slot)...)
|
||||
key = append(key, blob.BlockRoot...)
|
||||
return key
|
||||
}
|
||||
239
beacon-chain/db/kv/blob_test.go
Normal file
239
beacon-chain/db/kv/blob_test.go
Normal file
@@ -0,0 +1,239 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assertions"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func equalBlobSlices(expect []*ethpb.BlobSidecar, got []*ethpb.BlobSidecar) error {
|
||||
if len(expect) != len(got) {
|
||||
return fmt.Errorf("mismatched lengths, expect=%d, got=%d", len(expect), len(got))
|
||||
}
|
||||
for i := 0; i < len(expect); i++ {
|
||||
es := expect[i]
|
||||
gs := got[i]
|
||||
var e string
|
||||
assertions.DeepEqual(assertions.SprintfAssertionLoggerFn(&e), es, gs)
|
||||
if e != "" {
|
||||
return errors.New(e)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestStore_BlobSidecars(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, 0)
|
||||
require.ErrorContains(t, "nil or empty blob sidecars", db.SaveBlobSidecar(ctx, scs))
|
||||
})
|
||||
t.Run("empty by root", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
got, err := db.BlobSidecarsByRoot(ctx, [32]byte{})
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("empty by slot", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
got, err := db.BlobSidecarsBySlot(ctx, 1)
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("save and retrieve by root (one)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, 1)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, 1, len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
})
|
||||
t.Run("save and retrieve by root (max)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
})
|
||||
t.Run("save and retrieve valid subset by root", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||
|
||||
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||
expect := make([]*ethpb.BlobSidecar, 2)
|
||||
expect[0] = scs[0]
|
||||
expect[1] = scs[3]
|
||||
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), 0, 3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(expect, got))
|
||||
require.Equal(t, uint64(0), got[0].Index)
|
||||
require.Equal(t, uint64(3), got[1].Index)
|
||||
})
|
||||
t.Run("error for invalid index when retrieving by root", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), uint64(len(scs)))
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("save and retrieve by slot (one)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, 1)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, 1, len(scs))
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
})
|
||||
t.Run("save and retrieve by slot (max)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
})
|
||||
t.Run("save and retrieve valid subset by slot", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||
|
||||
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||
expect := make([]*ethpb.BlobSidecar, 2)
|
||||
expect[0] = scs[0]
|
||||
expect[1] = scs[3]
|
||||
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, 0, 3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(expect, got))
|
||||
|
||||
require.Equal(t, uint64(0), got[0].Index)
|
||||
require.Equal(t, uint64(3), got[1].Index)
|
||||
})
|
||||
t.Run("error for invalid index when retrieving by slot", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, uint64(len(scs)))
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("delete works", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
require.NoError(t, db.DeleteBlobSidecar(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)))
|
||||
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("saving a blob with older slot", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
require.ErrorContains(t, "but already have older blob with slot", db.SaveBlobSidecar(ctx, scs))
|
||||
})
|
||||
t.Run("saving a new blob for rotation", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||
oldBlockRoot := scs[0].BlockRoot
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(oldBlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
|
||||
newScs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
newRetentionSlot := primitives.Slot(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
newScs[0].Slot = scs[0].Slot + newRetentionSlot
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, newScs))
|
||||
|
||||
_, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(oldBlockRoot))
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
|
||||
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(newScs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(newScs, got))
|
||||
})
|
||||
}
|
||||
|
||||
func generateBlobSidecars(t *testing.T, n uint64) []*ethpb.BlobSidecar {
|
||||
blobSidecars := make([]*ethpb.BlobSidecar, n)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
blobSidecars[i] = generateBlobSidecar(t, i)
|
||||
}
|
||||
return blobSidecars
|
||||
}
|
||||
|
||||
func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
|
||||
blockRoot := make([]byte, 32)
|
||||
_, err := rand.Read(blockRoot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
slot := make([]byte, 8)
|
||||
_, err = rand.Read(slot)
|
||||
require.NoError(t, err)
|
||||
blockParentRoot := make([]byte, 32)
|
||||
_, err = rand.Read(blockParentRoot)
|
||||
require.NoError(t, err)
|
||||
proposerIndex := make([]byte, 8)
|
||||
_, err = rand.Read(proposerIndex)
|
||||
require.NoError(t, err)
|
||||
blobData := make([]byte, 131072)
|
||||
_, err = rand.Read(blobData)
|
||||
require.NoError(t, err)
|
||||
blob := &enginev1.Blob{
|
||||
Data: blobData,
|
||||
}
|
||||
kzgCommitment := make([]byte, 48)
|
||||
_, err = rand.Read(kzgCommitment)
|
||||
require.NoError(t, err)
|
||||
kzgProof := make([]byte, 48)
|
||||
_, err = rand.Read(kzgProof)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ðpb.BlobSidecar{
|
||||
BlockRoot: blockRoot,
|
||||
Index: index,
|
||||
Slot: primitives.Slot(binary.LittleEndian.Uint64(slot)),
|
||||
BlockParentRoot: blockParentRoot,
|
||||
ProposerIndex: primitives.ValidatorIndex(binary.LittleEndian.Uint64(proposerIndex)),
|
||||
Blob: blob,
|
||||
KzgCommitment: kzgCommitment,
|
||||
KzgProof: kzgProof,
|
||||
}
|
||||
}
|
||||
@@ -818,6 +818,16 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(capellaBlindKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Capella block")
|
||||
}
|
||||
case hasDenebKey(enc):
|
||||
rawBlock = ðpb.SignedBeaconBlockDeneb{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case hasDenebBlindKey(enc):
|
||||
rawBlock = ðpb.SignedBlindedBeaconBlockDeneb{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(denebBlindKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Deneb block")
|
||||
}
|
||||
default:
|
||||
// Marshal block bytes to phase 0 beacon block.
|
||||
rawBlock = ðpb.SignedBeaconBlock{}
|
||||
@@ -854,6 +864,8 @@ func marshalBlockFull(
|
||||
return nil, err
|
||||
}
|
||||
switch blk.Version() {
|
||||
case version.Deneb:
|
||||
return snappy.Encode(nil, append(denebKey, encodedBlock...)), nil
|
||||
case version.Capella:
|
||||
return snappy.Encode(nil, append(capellaKey, encodedBlock...)), nil
|
||||
case version.Bellatrix:
|
||||
@@ -888,6 +900,8 @@ func marshalBlockBlinded(
|
||||
return nil, errors.Wrap(err, "could not marshal blinded block")
|
||||
}
|
||||
switch blk.Version() {
|
||||
case version.Deneb:
|
||||
return snappy.Encode(nil, append(denebBlindKey, encodedBlock...)), nil
|
||||
case version.Capella:
|
||||
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil
|
||||
case version.Bellatrix:
|
||||
|
||||
@@ -37,3 +37,17 @@ func hasCapellaBlindKey(enc []byte) bool {
|
||||
}
|
||||
return bytes.Equal(enc[:len(capellaBlindKey)], capellaBlindKey)
|
||||
}
|
||||
|
||||
func hasDenebKey(enc []byte) bool {
|
||||
if len(denebKey) >= len(enc) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(enc[:len(denebKey)], denebKey)
|
||||
}
|
||||
|
||||
func hasDenebBlindKey(enc []byte) bool {
|
||||
if len(denebBlindKey) >= len(enc) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(enc[:len(denebBlindKey)], denebBlindKey)
|
||||
}
|
||||
|
||||
@@ -129,6 +129,8 @@ var Buckets = [][]byte{
|
||||
|
||||
feeRecipientBucket,
|
||||
registrationBucket,
|
||||
|
||||
blobsBucket,
|
||||
}
|
||||
|
||||
// NewKVStore initializes a new boltDB key-value store at the directory
|
||||
|
||||
@@ -46,6 +46,7 @@ var (
|
||||
finalizedCheckpointKey = []byte("finalized-checkpoint")
|
||||
powchainDataKey = []byte("powchain-data")
|
||||
lastValidatedCheckpointKey = []byte("last-validated-checkpoint")
|
||||
blobsBucket = []byte("blobs")
|
||||
|
||||
// Below keys are used to identify objects are to be fork compatible.
|
||||
// Objects that are only compatible with specific forks should be prefixed with such keys.
|
||||
@@ -55,6 +56,9 @@ var (
|
||||
capellaKey = []byte("capella")
|
||||
capellaBlindKey = []byte("blind-capella")
|
||||
saveBlindedBeaconBlocksKey = []byte("save-blinded-beacon-blocks")
|
||||
denebKey = []byte("deneb")
|
||||
denebBlindKey = []byte("blind-deneb")
|
||||
|
||||
// block root included in the beacon state used by weak subjectivity initial sync
|
||||
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
|
||||
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
|
||||
|
||||
@@ -473,6 +473,19 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
|
||||
}
|
||||
|
||||
switch {
|
||||
case hasDenebKey(enc):
|
||||
protoState := ðpb.BeaconStateDeneb{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal encoding for Deneb")
|
||||
}
|
||||
ok, err := s.isStateValidatorMigrationOver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
protoState.Validators = validatorEntries
|
||||
}
|
||||
return statenative.InitializeFromProtoUnsafeDeneb(protoState)
|
||||
case hasCapellaKey(enc):
|
||||
// Marshal state bytes to capella beacon state.
|
||||
protoState := ðpb.BeaconStateCapella{}
|
||||
@@ -580,6 +593,19 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(capellaKey, rawObj...)), nil
|
||||
case *ethpb.BeaconStateDeneb:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateDeneb)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
if rState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
rawObj, err := rState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(denebKey, rawObj...)), nil
|
||||
default:
|
||||
return nil, errors.New("invalid inner state")
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ const (
|
||||
NewPayloadMethod = "engine_newPayloadV1"
|
||||
// NewPayloadMethodV2 v2 request string for JSON-RPC.
|
||||
NewPayloadMethodV2 = "engine_newPayloadV2"
|
||||
NewPayloadMethodV3 = "engine_newPayloadV3"
|
||||
// ForkchoiceUpdatedMethod v1 request string for JSON-RPC.
|
||||
ForkchoiceUpdatedMethod = "engine_forkchoiceUpdatedV1"
|
||||
// ForkchoiceUpdatedMethodV2 v2 request string for JSON-RPC.
|
||||
@@ -43,6 +44,9 @@ const (
|
||||
GetPayloadMethod = "engine_getPayloadV1"
|
||||
// GetPayloadMethodV2 v2 request string for JSON-RPC.
|
||||
GetPayloadMethodV2 = "engine_getPayloadV2"
|
||||
GetPayloadMethodV3 = "engine_getPayloadV3"
|
||||
// GetBlobsBundleMethod v1 request string for JSON-RPC.
|
||||
GetBlobsBundleMethod = "engine_getBlobsBundleV1"
|
||||
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
|
||||
ExchangeTransitionConfigurationMethod = "engine_exchangeTransitionConfigurationV1"
|
||||
// ExecutionBlockByHashMethod request string for JSON-RPC.
|
||||
@@ -89,6 +93,7 @@ type EngineCaller interface {
|
||||
) error
|
||||
ExecutionBlockByHash(ctx context.Context, hash common.Hash, withTxs bool) (*pb.ExecutionBlock, error)
|
||||
GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error)
|
||||
GetBlobsBundle(ctx context.Context, payloadId [8]byte) (*pb.BlobsBundle, error)
|
||||
}
|
||||
|
||||
var EmptyBlockHash = errors.New("Block hash is empty 0x0000...")
|
||||
@@ -126,6 +131,15 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
case *pb.ExecutionPayloadDeneb:
|
||||
payloadPb, ok := payload.Proto().(*pb.ExecutionPayloadDeneb)
|
||||
if !ok {
|
||||
return nil, errors.New("execution data must be a Deneb execution payload")
|
||||
}
|
||||
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethodV3, payloadPb)
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("unknown execution data type")
|
||||
}
|
||||
@@ -173,7 +187,7 @@ func (s *Service) ForkchoiceUpdated(
|
||||
if err != nil {
|
||||
return nil, nil, handleRPCError(err)
|
||||
}
|
||||
case version.Capella:
|
||||
case version.Capella, version.Deneb:
|
||||
a, err := attrs.PbV2()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -215,6 +229,15 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
|
||||
result := &pb.ExecutionPayloadDenebWithValue{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV3, pb.PayloadIDBytes(payloadId))
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
return blocks.WrappedExecutionPayloadDeneb(result.Payload, big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(result.Value)))
|
||||
}
|
||||
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
result := &pb.ExecutionPayloadCapellaWithValue{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV2, pb.PayloadIDBytes(payloadId))
|
||||
@@ -422,6 +445,19 @@ func (s *Service) ExecutionBlocksByHashes(ctx context.Context, hashes []common.H
|
||||
return execBlks, nil
|
||||
}
|
||||
|
||||
// GetBlobsBundle calls the engine_getBlobsV1 method via JSON-RPC.
|
||||
func (s *Service) GetBlobsBundle(ctx context.Context, payloadId [8]byte) (*pb.BlobsBundle, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsBundle")
|
||||
defer span.End()
|
||||
|
||||
d := time.Now().Add(defaultEngineTimeout)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
result := &pb.BlobsBundle{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetBlobsBundleMethod, pb.PayloadIDBytes(payloadId))
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// HeaderByHash returns the relevant header details for the provided block hash.
|
||||
func (s *Service) HeaderByHash(ctx context.Context, hash common.Hash) (*types.HeaderInfo, error) {
|
||||
var hdr *types.HeaderInfo
|
||||
|
||||
@@ -405,8 +405,8 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
|
||||
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
|
||||
web3Service.httpLogger = testAcc.Backend
|
||||
web3Service.latestEth1Data.LastRequestedBlock = 0
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
bConfig := params.MinimalSpecConfig().Copy()
|
||||
bConfig.MinGenesisTime = 0
|
||||
bConfig.SecondsPerETH1Block = 10
|
||||
@@ -444,8 +444,8 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
|
||||
for i := uint64(0); i < params.BeaconConfig().Eth1FollowDistance; i++ {
|
||||
testAcc.Backend.Commit()
|
||||
}
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
|
||||
// Set up our subscriber now to listen for the chain started event.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
@@ -502,8 +502,8 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
|
||||
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
|
||||
web3Service.httpLogger = testAcc.Backend
|
||||
web3Service.latestEth1Data.LastRequestedBlock = 0
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
bConfig := params.MinimalSpecConfig().Copy()
|
||||
bConfig.SecondsPerETH1Block = 10
|
||||
params.OverrideBeaconConfig(bConfig)
|
||||
@@ -540,14 +540,14 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
|
||||
for i := uint64(0); i < 1500; i++ {
|
||||
testAcc.Backend.Commit()
|
||||
}
|
||||
wantedGenesisTime := testAcc.Backend.Blockchain().CurrentBlock().Time()
|
||||
wantedGenesisTime := testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
|
||||
// Forward the chain to account for the follow distance
|
||||
for i := uint64(0); i < params.BeaconConfig().Eth1FollowDistance; i++ {
|
||||
testAcc.Backend.Commit()
|
||||
}
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
|
||||
// Set the genesis time 500 blocks ahead of the last
|
||||
// deposit log.
|
||||
|
||||
@@ -3,6 +3,7 @@ package execution
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -14,7 +15,6 @@ import (
|
||||
contracts "github.com/prysmaticlabs/prysm/v4/contracts/deposit"
|
||||
"github.com/prysmaticlabs/prysm/v4/io/logs"
|
||||
"github.com/prysmaticlabs/prysm/v4/network"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/authorization"
|
||||
)
|
||||
|
||||
func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpoint network.Endpoint) error {
|
||||
@@ -34,7 +34,7 @@ func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpo
|
||||
}
|
||||
s.depositContractCaller = depositContractCaller
|
||||
|
||||
// Ensure we have the correct chain and deposit IDs.
|
||||
//Ensure we have the correct chain and deposit IDs.
|
||||
if err := ensureCorrectExecutionChain(ctx, fetcher); err != nil {
|
||||
client.Close()
|
||||
errStr := err.Error()
|
||||
@@ -113,9 +113,21 @@ func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.End
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headers := http.Header{}
|
||||
for _, h := range s.cfg.headers {
|
||||
if h == "" {
|
||||
continue
|
||||
}
|
||||
keyValue := strings.Split(h, "=")
|
||||
if len(keyValue) < 2 {
|
||||
log.Warnf("Incorrect HTTP header flag format. Skipping %v", keyValue[0])
|
||||
continue
|
||||
}
|
||||
headers.Set(keyValue[0], strings.Join(keyValue[1:], "="))
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "http", "https":
|
||||
client, err = gethRPC.DialOptions(ctx, endpoint.Url, gethRPC.WithHTTPClient(endpoint.HttpClient()))
|
||||
client, err = gethRPC.DialOptions(ctx, endpoint.Url, gethRPC.WithHTTPClient(endpoint.HttpClient()), gethRPC.WithHeaders(headers))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -127,13 +139,6 @@ func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.End
|
||||
default:
|
||||
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
|
||||
}
|
||||
if endpoint.Auth.Method != authorization.None {
|
||||
header, err := endpoint.Auth.ToHeaderValue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.SetHeader("Authorization", header)
|
||||
}
|
||||
for _, h := range s.cfg.headers {
|
||||
if h != "" {
|
||||
keyValue := strings.Split(h, "=")
|
||||
|
||||
@@ -210,14 +210,14 @@ func TestFollowBlock_OK(t *testing.T) {
|
||||
|
||||
web3Service = setDefaultMocks(web3Service)
|
||||
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
|
||||
baseHeight := testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
||||
baseHeight := testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
// process follow_distance blocks
|
||||
for i := 0; i < int(params.BeaconConfig().Eth1FollowDistance); i++ {
|
||||
testAcc.Backend.Commit()
|
||||
}
|
||||
// set current height
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
|
||||
h, err := web3Service.followedBlockHeight(context.Background())
|
||||
require.NoError(t, err)
|
||||
@@ -229,8 +229,8 @@ func TestFollowBlock_OK(t *testing.T) {
|
||||
testAcc.Backend.Commit()
|
||||
}
|
||||
// set current height
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().NumberU64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time()
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
|
||||
h, err = web3Service.followedBlockHeight(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -38,6 +38,7 @@ type EngineClient struct {
|
||||
TerminalBlockHash []byte
|
||||
TerminalBlockHashExists bool
|
||||
OverrideValidHash [32]byte
|
||||
BlobsBundle *pb.BlobsBundle
|
||||
BlockValue *big.Int
|
||||
}
|
||||
|
||||
@@ -172,3 +173,8 @@ func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime
|
||||
blk = parentBlk
|
||||
}
|
||||
}
|
||||
|
||||
// GetBlobsBundle --
|
||||
func (e *EngineClient) GetBlobsBundle(ctx context.Context, payloadId [8]byte) (*pb.BlobsBundle, error) {
|
||||
return e.BlobsBundle, nil
|
||||
}
|
||||
|
||||
@@ -63,6 +63,7 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
fastssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
@@ -189,3 +190,7 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
|
||||
" Default fee recipient will be used as a fall back", checksumAddress.Hex())
|
||||
return params.SetActive(c)
|
||||
}
|
||||
|
||||
func configureFastSSZHashingAlgorithm() {
|
||||
fastssz.EnableVectorizedHTR = true
|
||||
}
|
||||
|
||||
@@ -148,6 +148,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
if err := configureExecutionSetting(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configureFastSSZHashingAlgorithm()
|
||||
|
||||
// Initializes any forks here.
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
@@ -798,10 +799,10 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
PeerManager: p2pService,
|
||||
MetadataProvider: p2pService,
|
||||
ChainInfoFetcher: chainService,
|
||||
HeadUpdater: chainService,
|
||||
HeadFetcher: chainService,
|
||||
CanonicalFetcher: chainService,
|
||||
ForkFetcher: chainService,
|
||||
ForkchoiceFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
|
||||
@@ -24,6 +24,7 @@ go_library(
|
||||
"options.go",
|
||||
"pubsub.go",
|
||||
"pubsub_filter.go",
|
||||
"pubsub_tracer.go",
|
||||
"rpc_topic_mappings.go",
|
||||
"sender.go",
|
||||
"service.go",
|
||||
@@ -84,6 +85,7 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peerstore:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/muxer/mplex:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
|
||||
|
||||
@@ -141,6 +141,35 @@ func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *
|
||||
}
|
||||
}
|
||||
|
||||
// BroadcastBlob broadcasts a blob to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input subnet.
|
||||
func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.SignedBlobSidecar) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastBlob")
|
||||
defer span.End()
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not retrieve fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a subnet peer if none available.
|
||||
go s.broadcastBlob(ctx, subnet, blob, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) broadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar, forkDigest [4]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastBlob")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
|
||||
if err := s.broadcastObject(ctx, blobSidecar, blobSubnetToTopic(subnet, forkDigest)); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecar")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [4]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
|
||||
defer span.End()
|
||||
@@ -232,3 +261,7 @@ func attestationToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
@@ -17,7 +17,8 @@ func (s *Service) forkWatcher() {
|
||||
currEpoch := slots.ToEpoch(currSlot)
|
||||
if currEpoch == params.BeaconConfig().AltairForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().BellatrixForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().CapellaForkEpoch {
|
||||
currEpoch == params.BeaconConfig().CapellaForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().DenebForkEpoch {
|
||||
// If we are in the fork epoch, we update our enr with
|
||||
// the updated fork digest. These repeatedly does
|
||||
// this over the epoch, which might be slightly wasteful
|
||||
|
||||
@@ -119,6 +119,9 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
return defaultProposerSlashingTopicParams(), nil
|
||||
case strings.Contains(topic, GossipAttesterSlashingMessage):
|
||||
return defaultAttesterSlashingTopicParams(), nil
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage):
|
||||
// TODO(Deneb): Using the default block scoring. But this should be updated.
|
||||
return defaultBlockTopicParams(), nil
|
||||
case strings.Contains(topic, GossipBlsToExecutionChangeMessage):
|
||||
return defaultBlsToExecutionChangeTopicParams(), nil
|
||||
default:
|
||||
|
||||
@@ -21,12 +21,16 @@ var gossipTopicMappings = map[string]proto.Message{
|
||||
SyncContributionAndProofSubnetTopicFormat: ðpb.SignedContributionAndProof{},
|
||||
SyncCommitteeSubnetTopicFormat: ðpb.SyncCommitteeMessage{},
|
||||
BlsToExecutionChangeSubnetTopicFormat: ðpb.SignedBLSToExecutionChange{},
|
||||
BlobSubnetTopicFormat: ðpb.SignedBlobSidecar{},
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
// versioned by epoch.
|
||||
func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
|
||||
if topic == BlockSubnetTopicFormat {
|
||||
if epoch >= params.BeaconConfig().DenebForkEpoch {
|
||||
return ðpb.SignedBeaconBlockDeneb{}
|
||||
}
|
||||
if epoch >= params.BeaconConfig().CapellaForkEpoch {
|
||||
return ðpb.SignedBeaconBlockCapella{}
|
||||
}
|
||||
@@ -64,4 +68,6 @@ func init() {
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockBellatrix{})] = BlockSubnetTopicFormat
|
||||
// Specially handle Capella objects
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockCapella{})] = BlockSubnetTopicFormat
|
||||
// Specially handle Deneb objects
|
||||
GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlockDeneb{})] = BlockSubnetTopicFormat
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ type Broadcaster interface {
|
||||
Broadcast(context.Context, proto.Message) error
|
||||
BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error
|
||||
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
@@ -63,6 +64,80 @@ var (
|
||||
Name: "p2p_sync_committee_subnet_attempted_broadcasts",
|
||||
Help: "The number of sync committee that were attempted to be broadcast.",
|
||||
})
|
||||
|
||||
// Gossip Tracer Metrics
|
||||
pubsubTopicsActive = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "p2p_pubsub_topic_active",
|
||||
Help: "The topics that the peer is participating in gossipsub.",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubTopicsGraft = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_graft_total",
|
||||
Help: "The number of graft messages sent for a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubTopicsPrune = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_prune_total",
|
||||
Help: "The number of prune messages sent for a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubMessageDeliver = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_deliver_total",
|
||||
Help: "The number of messages received for delivery of a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubMessageUndeliverable = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_undeliverable_total",
|
||||
Help: "The number of messages received which weren't able to be delivered of a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubMessageValidate = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_validate_total",
|
||||
Help: "The number of messages received for validation of a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubMessageDuplicate = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_duplicate_total",
|
||||
Help: "The number of duplicate messages sent for a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubMessageReject = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_reject_total",
|
||||
Help: "The number of messages rejected of a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubPeerThrottle = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_throttle_total",
|
||||
Help: "The number of times a peer has been throttled for a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubRPCRecv = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_recv_total",
|
||||
Help: "The number of messages received via rpc for a particular topic",
|
||||
},
|
||||
[]string{"control_message"})
|
||||
pubsubRPCSubRecv = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_recv_sub_total",
|
||||
Help: "The number of subscription messages received via rpc",
|
||||
})
|
||||
pubsubRPCDrop = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_drop_total",
|
||||
Help: "The number of messages dropped via rpc for a particular topic",
|
||||
},
|
||||
[]string{"control_message"})
|
||||
pubsubRPCSubDrop = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_drop_sub_total",
|
||||
Help: "The number of subscription messages dropped via rpc",
|
||||
})
|
||||
pubsubRPCSent = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_sent_total",
|
||||
Help: "The number of messages sent via rpc for a particular topic",
|
||||
},
|
||||
[]string{"control_message"})
|
||||
pubsubRPCSubSent = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_sent_sub_total",
|
||||
Help: "The number of subscription messages sent via rpc",
|
||||
})
|
||||
)
|
||||
|
||||
func (s *Service) updateMetrics() {
|
||||
@@ -84,20 +159,7 @@ func (s *Service) updateMetrics() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the agent data.
|
||||
rawAgent, err := store.Get(pid, "AgentVersion")
|
||||
agent, ok := rawAgent.(string)
|
||||
if err != nil || !ok {
|
||||
agent = "unknown"
|
||||
}
|
||||
foundName := "unknown"
|
||||
for _, knownAgent := range knownAgentVersions {
|
||||
// If the agent string matches one of our known agents, we set
|
||||
// the value to our own, sanitized string.
|
||||
if strings.Contains(strings.ToLower(agent), knownAgent) {
|
||||
foundName = knownAgent
|
||||
}
|
||||
}
|
||||
foundName := agentFromPid(pid, store)
|
||||
numConnectedPeersByClient[foundName] += 1
|
||||
|
||||
// Get peer scoring data.
|
||||
@@ -123,3 +185,21 @@ func average(xs []float64) float64 {
|
||||
}
|
||||
return total / float64(len(xs))
|
||||
}
|
||||
|
||||
func agentFromPid(pid peer.ID, store peerstore.Peerstore) string {
|
||||
// Get the agent data.
|
||||
rawAgent, err := store.Get(pid, "AgentVersion")
|
||||
agent, ok := rawAgent.(string)
|
||||
if err != nil || !ok {
|
||||
return "unknown"
|
||||
}
|
||||
foundName := "unknown"
|
||||
for _, knownAgent := range knownAgentVersions {
|
||||
// If the agent string matches one of our known agents, we set
|
||||
// the value to our own, sanitized string.
|
||||
if strings.Contains(strings.ToLower(agent), knownAgent) {
|
||||
foundName = knownAgent
|
||||
}
|
||||
}
|
||||
return foundName
|
||||
}
|
||||
|
||||
@@ -145,6 +145,7 @@ func (s *Service) pubsubOptions() []pubsub.Option {
|
||||
pubsub.WithPeerScore(peerScoringParams()),
|
||||
pubsub.WithPeerScoreInspect(s.peerInspector, time.Minute),
|
||||
pubsub.WithGossipSubParams(pubsubGossipParam()),
|
||||
pubsub.WithRawTracer(gossipTracer{host: s.host}),
|
||||
}
|
||||
return psOpts
|
||||
}
|
||||
|
||||
@@ -57,12 +57,17 @@ func (s *Service) CanSubscribe(topic string) bool {
|
||||
log.WithError(err).Error("Could not determine Capella fork digest")
|
||||
return false
|
||||
}
|
||||
|
||||
denebForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Capella fork digest")
|
||||
return false
|
||||
}
|
||||
switch parts[2] {
|
||||
case fmt.Sprintf("%x", phase0ForkDigest):
|
||||
case fmt.Sprintf("%x", altairForkDigest):
|
||||
case fmt.Sprintf("%x", bellatrixForkDigest):
|
||||
case fmt.Sprintf("%x", capellaForkDigest):
|
||||
case fmt.Sprintf("%x", denebForkDigest):
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -92,7 +92,7 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
formatting := []interface{}{digest}
|
||||
|
||||
// Special case for attestation subnets which have a second formatting placeholder.
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat {
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat {
|
||||
formatting = append(formatting, 0 /* some subnet ID */)
|
||||
}
|
||||
|
||||
|
||||
103
beacon-chain/p2p/pubsub_tracer.go
Normal file
103
beacon-chain/p2p/pubsub_tracer.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var _ = pubsub.RawTracer(gossipTracer{})
|
||||
|
||||
// This tracer is used to implement metrics collection for messages received
|
||||
// and broadcasted through gossipsub.
|
||||
type gossipTracer struct {
|
||||
host host.Host
|
||||
}
|
||||
|
||||
// AddPeer .
|
||||
func (g gossipTracer) AddPeer(p peer.ID, proto protocol.ID) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
// RemovePeer .
|
||||
func (g gossipTracer) RemovePeer(p peer.ID) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
// Join .
|
||||
func (g gossipTracer) Join(topic string) {
|
||||
pubsubTopicsActive.WithLabelValues(topic).Set(1)
|
||||
}
|
||||
|
||||
// Leave .
|
||||
func (g gossipTracer) Leave(topic string) {
|
||||
pubsubTopicsActive.WithLabelValues(topic).Set(0)
|
||||
}
|
||||
|
||||
// Graft .
|
||||
func (g gossipTracer) Graft(p peer.ID, topic string) {
|
||||
pubsubTopicsGraft.WithLabelValues(topic).Inc()
|
||||
}
|
||||
|
||||
// Prune .
|
||||
func (g gossipTracer) Prune(p peer.ID, topic string) {
|
||||
pubsubTopicsPrune.WithLabelValues(topic).Inc()
|
||||
}
|
||||
|
||||
// ValidateMessage .
|
||||
func (g gossipTracer) ValidateMessage(msg *pubsub.Message) {
|
||||
pubsubMessageValidate.WithLabelValues(*msg.Topic).Inc()
|
||||
}
|
||||
|
||||
// DeliverMessage .
|
||||
func (g gossipTracer) DeliverMessage(msg *pubsub.Message) {
|
||||
pubsubMessageDeliver.WithLabelValues(*msg.Topic).Inc()
|
||||
}
|
||||
|
||||
// RejectMessage .
|
||||
func (g gossipTracer) RejectMessage(msg *pubsub.Message, reason string) {
|
||||
pubsubMessageReject.WithLabelValues(*msg.Topic).Inc()
|
||||
}
|
||||
|
||||
// DuplicateMessage .
|
||||
func (g gossipTracer) DuplicateMessage(msg *pubsub.Message) {
|
||||
pubsubMessageDuplicate.WithLabelValues(*msg.Topic).Inc()
|
||||
}
|
||||
|
||||
// UndeliverableMessage .
|
||||
func (g gossipTracer) UndeliverableMessage(msg *pubsub.Message) {
|
||||
pubsubMessageUndeliverable.WithLabelValues(*msg.Topic).Inc()
|
||||
}
|
||||
|
||||
// ThrottlePeer .
|
||||
func (g gossipTracer) ThrottlePeer(p peer.ID) {
|
||||
agent := agentFromPid(p, g.host.Peerstore())
|
||||
pubsubPeerThrottle.WithLabelValues(agent).Inc()
|
||||
}
|
||||
|
||||
// RecvRPC .
|
||||
func (g gossipTracer) RecvRPC(rpc *pubsub.RPC) {
|
||||
setMetricFromRPC(pubsubRPCSubRecv, pubsubRPCRecv, rpc)
|
||||
}
|
||||
|
||||
// SendRPC .
|
||||
func (g gossipTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) {
|
||||
setMetricFromRPC(pubsubRPCSubSent, pubsubRPCSent, rpc)
|
||||
}
|
||||
|
||||
// DropRPC .
|
||||
func (g gossipTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) {
|
||||
setMetricFromRPC(pubsubRPCSubDrop, pubsubRPCDrop, rpc)
|
||||
}
|
||||
|
||||
func setMetricFromRPC(ctr prometheus.Counter, gauge *prometheus.CounterVec, rpc *pubsub.RPC) {
|
||||
ctr.Add(float64(len(rpc.Subscriptions)))
|
||||
if rpc.Control != nil {
|
||||
gauge.WithLabelValues("graft").Add(float64(len(rpc.Control.Graft)))
|
||||
gauge.WithLabelValues("prune").Add(float64(len(rpc.Control.Prune)))
|
||||
gauge.WithLabelValues("ihave").Add(float64(len(rpc.Control.Ihave)))
|
||||
gauge.WithLabelValues("iwant").Add(float64(len(rpc.Control.Iwant)))
|
||||
}
|
||||
}
|
||||
@@ -37,6 +37,9 @@ const PingMessageName = "/ping"
|
||||
// MetadataMessageName specifies the name for the metadata message topic.
|
||||
const MetadataMessageName = "/metadata"
|
||||
|
||||
const BlobSidecarsByRootName = "/blob_sidecars_by_root"
|
||||
const BlobSidecarsByRangeName = "/blob_sidecars_by_range"
|
||||
|
||||
const (
|
||||
// V1 RPC Topics
|
||||
// RPCStatusTopicV1 defines the v1 topic for the status rpc method.
|
||||
@@ -52,6 +55,15 @@ const (
|
||||
// RPCMetaDataTopicV1 defines the v1 topic for the metadata rpc method.
|
||||
RPCMetaDataTopicV1 = protocolPrefix + MetadataMessageName + SchemaVersionV1
|
||||
|
||||
// RPCBlobSidecarsByRootTopicV1 is a topic for requesting blob sidecars by their block root. New in deneb.
|
||||
// /eth2/beacon_chain/req/blob_sidecars_by_root/1/
|
||||
RPCBlobSidecarsByRootTopicV1 = protocolPrefix + BlobSidecarsByRootName + SchemaVersionV1
|
||||
|
||||
// RPCBlobSidecarsByRangeTopicV1 is a topic for requesting blob sidecars
|
||||
// in the slot range [start_slot, start_slot + count), leading up to the current head block as selected by fork choice.
|
||||
// Protocol ID: /eth2/beacon_chain/req/blob_sidecars_by_range/1/ - New in deneb.
|
||||
RPCBlobSidecarsByRangeTopicV1 = protocolPrefix + BlobSidecarsByRangeName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
|
||||
RPCBlocksByRangeTopicV2 = protocolPrefix + BeaconBlocksByRangeMessageName + SchemaVersionV2
|
||||
@@ -83,6 +95,10 @@ var RPCTopicMappings = map[string]interface{}{
|
||||
// RPC Metadata Message
|
||||
RPCMetaDataTopicV1: new(interface{}),
|
||||
RPCMetaDataTopicV2: new(interface{}),
|
||||
// BlobSidecarsByRange v1 Message
|
||||
RPCBlobSidecarsByRangeTopicV1: new(pb.BlobSidecarsByRangeRequest),
|
||||
// BlobSidecarsByRoot v1 Message
|
||||
RPCBlobSidecarsByRootTopicV1: new(p2ptypes.BlobSidecarsByRootReq),
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
@@ -99,6 +115,8 @@ var messageMapping = map[string]bool{
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
BlobSidecarsByRangeName: true,
|
||||
BlobSidecarsByRootName: true,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in altair.
|
||||
@@ -113,6 +131,15 @@ var versionMapping = map[string]bool{
|
||||
SchemaVersionV2: true,
|
||||
}
|
||||
|
||||
var PreAltairV1SchemaMapping = map[string]bool{
|
||||
StatusMessageName: true,
|
||||
GoodbyeMessageName: true,
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
}
|
||||
|
||||
// VerifyTopicMapping verifies that the topic and its accompanying
|
||||
// message type is correct.
|
||||
func VerifyTopicMapping(topic string, msg interface{}) error {
|
||||
|
||||
@@ -138,6 +138,11 @@ func (_ *FakeP2P) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Att
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastBlob -- fake.
|
||||
func (p *FakeP2P) BroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastSyncCommitteeMessage -- fake.
|
||||
func (_ *FakeP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
return nil
|
||||
|
||||
@@ -33,3 +33,9 @@ func (m *MockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uin
|
||||
m.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastSyncCommitteeMessage records a broadcast occurred.
|
||||
func (m *MockBroadcaster) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.SignedBlobSidecar) error {
|
||||
m.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -170,6 +170,11 @@ func (p *TestP2P) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Att
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TestP2P) BroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar) error {
|
||||
p.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastSyncCommitteeMessage broadcasts a sync committee message.
|
||||
func (p *TestP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
p.BroadcastCalled = true
|
||||
|
||||
@@ -28,6 +28,8 @@ const (
|
||||
GossipContributionAndProofMessage = "sync_committee_contribution_and_proof"
|
||||
// GossipBlsToExecutionChangeMessage is the name for the bls to execution change message type.
|
||||
GossipBlsToExecutionChangeMessage = "bls_to_execution_change"
|
||||
// GossipBlobSidecarMessage is the name for the blob sidecar message type.
|
||||
GossipBlobSidecarMessage = "blob_sidecar"
|
||||
|
||||
// Topic Formats
|
||||
//
|
||||
@@ -49,4 +51,6 @@ const (
|
||||
SyncContributionAndProofSubnetTopicFormat = GossipProtocolAndDigest + GossipContributionAndProofMessage
|
||||
// BlsToExecutionChangeSubnetTopicFormat is the topic format for the bls to execution change subnet.
|
||||
BlsToExecutionChangeSubnetTopicFormat = GossipProtocolAndDigest + GossipBlsToExecutionChangeMessage
|
||||
// BlobSubnetTopicFormat is the topic format for the blob subnet.
|
||||
BlobSubnetTopicFormat = GossipProtocolAndDigest + GossipBlobSidecarMessage + "_%d"
|
||||
)
|
||||
|
||||
@@ -41,7 +41,9 @@ go_test(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -53,6 +53,11 @@ func InitializeDataMaps() {
|
||||
ðpb.SignedBeaconBlockCapella{Block: ðpb.BeaconBlockCapella{Body: ðpb.BeaconBlockBodyCapella{}}},
|
||||
)
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
return blocks.NewSignedBeaconBlock(
|
||||
ðpb.SignedBeaconBlockDeneb{Block: ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{}}},
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our metadata map.
|
||||
@@ -69,5 +74,8 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,4 +12,5 @@ var (
|
||||
ErrRateLimited = errors.New("rate limited")
|
||||
ErrIODeadline = errors.New("i/o deadline exceeded")
|
||||
ErrInvalidRequest = errors.New("invalid range, step or count")
|
||||
ErrBlobLTMinRequest = errors.New("blob slot < minimum_request_epoch")
|
||||
)
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
const rootLength = 32
|
||||
@@ -120,3 +121,66 @@ func (m *ErrorMessage) UnmarshalSSZ(buf []byte) error {
|
||||
*m = errMsg
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlobSidecarsByRootReq is used to specify a list of blob targets (root+index) in a BlobSidecarsByRoot RPC request.
|
||||
type BlobSidecarsByRootReq []*eth.BlobIdentifier
|
||||
|
||||
// BlobIdentifier is a fixed size value, so we can compute its fixed size at start time (see init below)
|
||||
var blobIdSize int
|
||||
|
||||
// SizeSSZ returns the size of the serialized representation.
|
||||
func (b *BlobSidecarsByRootReq) SizeSSZ() int {
|
||||
return len(*b) * blobIdSize
|
||||
}
|
||||
|
||||
// MarshalSSZTo marshals the block by roots request with the provided byte slice.
|
||||
func (b *BlobSidecarsByRootReq) MarshalSSZTo(dst []byte) ([]byte, error) {
|
||||
// A List without an enclosing container is marshaled exactly like a vector, no length offset required.
|
||||
marshalledObj, err := b.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(dst, marshalledObj...), nil
|
||||
}
|
||||
|
||||
// MarshalSSZ Marshals the block by roots request type into the serialized object.
|
||||
func (b *BlobSidecarsByRootReq) MarshalSSZ() ([]byte, error) {
|
||||
buf := make([]byte, len(*b)*blobIdSize)
|
||||
for i, id := range *b {
|
||||
by, err := id.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(buf[i*blobIdSize:(i+1)*blobIdSize], by)
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// UnmarshalSSZ unmarshals the provided bytes buffer into the
|
||||
// block by roots request object.
|
||||
func (b *BlobSidecarsByRootReq) UnmarshalSSZ(buf []byte) error {
|
||||
bufLen := len(buf)
|
||||
maxLength := int(params.BeaconNetworkConfig().MaxRequestBlobsSidecars) * blobIdSize
|
||||
if bufLen > maxLength {
|
||||
return errors.Errorf("expected buffer with length of upto %d but received length %d", maxLength, bufLen)
|
||||
}
|
||||
if bufLen%blobIdSize != 0 {
|
||||
return errors.Wrapf(ssz.ErrIncorrectByteSize, "size=%d", bufLen)
|
||||
}
|
||||
count := bufLen / blobIdSize
|
||||
*b = make([]*eth.BlobIdentifier, count)
|
||||
for i := 0; i < count; i++ {
|
||||
id := ð.BlobIdentifier{}
|
||||
err := id.UnmarshalSSZ(buf[i*blobIdSize : (i+1)*blobIdSize])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*b)[i] = id
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
sizer := ð.BlobIdentifier{}
|
||||
blobIdSize = sizer.SizeSSZ()
|
||||
}
|
||||
|
||||
@@ -4,12 +4,82 @@ import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func generateBlobIdentifiers(n int) []*eth.BlobIdentifier {
|
||||
r := make([]*eth.BlobIdentifier, n)
|
||||
for i := 0; i < n; i++ {
|
||||
r[i] = ð.BlobIdentifier{
|
||||
BlockRoot: bytesutil.PadTo([]byte{byte(i)}, 32),
|
||||
Index: 0,
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func TestBlobSidecarsByRootReq_MarshalSSZ(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
ids []*eth.BlobIdentifier
|
||||
marshalErr error
|
||||
unmarshalErr error
|
||||
unmarshalMod func([]byte) []byte
|
||||
}{
|
||||
{
|
||||
name: "empty list",
|
||||
},
|
||||
{
|
||||
name: "single item list",
|
||||
ids: generateBlobIdentifiers(1),
|
||||
},
|
||||
{
|
||||
name: "10 item list",
|
||||
ids: generateBlobIdentifiers(10),
|
||||
},
|
||||
{
|
||||
name: "wonky unmarshal size",
|
||||
ids: generateBlobIdentifiers(10),
|
||||
unmarshalMod: func(in []byte) []byte {
|
||||
in = append(in, byte(0))
|
||||
return in
|
||||
},
|
||||
unmarshalErr: ssz.ErrIncorrectByteSize,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
r := BlobSidecarsByRootReq(c.ids)
|
||||
by, err := r.MarshalSSZ()
|
||||
if c.marshalErr != nil {
|
||||
require.ErrorIs(t, err, c.marshalErr)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
if c.unmarshalMod != nil {
|
||||
by = c.unmarshalMod(by)
|
||||
}
|
||||
got := &BlobSidecarsByRootReq{}
|
||||
err = got.UnmarshalSSZ(by)
|
||||
if c.unmarshalErr != nil {
|
||||
require.ErrorIs(t, err, c.unmarshalErr)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
for i, gid := range *got {
|
||||
require.DeepEqual(t, c.ids[i], gid)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconBlockByRootsReq_Limit(t *testing.T) {
|
||||
fixedRoots := make([][32]byte, 0)
|
||||
for i := uint64(0); i < params.BeaconNetworkConfig().MaxRequestBlocks+100; i++ {
|
||||
|
||||
@@ -503,6 +503,13 @@ type capellaBlockResponseJson struct {
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type denebBlockResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *SignedBeaconBlockDenebContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type bellatrixBlindedBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBlindedBeaconBlockBellatrixContainerJson `json:"data"`
|
||||
@@ -517,6 +524,12 @@ type capellaBlindedBlockResponseJson struct {
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type denebBlindedBlockResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *SignedBlindedBeaconBlockDenebContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
respContainer, ok := response.(*BlockV2ResponseJson)
|
||||
if !ok {
|
||||
@@ -565,6 +578,16 @@ func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, a
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_Deneb.String())):
|
||||
actualRespContainer = &denebBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockDenebContainerJson{
|
||||
Message: respContainer.Data.DenebBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||
}
|
||||
@@ -624,6 +647,15 @@ func serializeBlindedBlock(response interface{}) (apimiddleware.RunDefault, []by
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_Deneb.String())):
|
||||
actualRespContainer = &denebBlindedBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBlindedBeaconBlockDenebContainerJson{
|
||||
Message: respContainer.Data.DenebBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||
}
|
||||
@@ -655,6 +687,11 @@ type capellaStateResponseJson struct {
|
||||
Data *BeaconStateCapellaJson `json:"data"`
|
||||
}
|
||||
|
||||
type denebStateResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *BeaconStateDenebJson `json:"data"`
|
||||
}
|
||||
|
||||
func serializeV2State(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
respContainer, ok := response.(*BeaconStateV2ResponseJson)
|
||||
if !ok {
|
||||
@@ -683,6 +720,11 @@ func serializeV2State(response interface{}) (apimiddleware.RunDefault, []byte, a
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.CapellaState,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_Deneb.String())):
|
||||
actualRespContainer = &denebStateResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.DenebState,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported state version '%s'", respContainer.Version))
|
||||
}
|
||||
|
||||
@@ -395,6 +395,7 @@ type SignedBeaconBlockContainerV2Json struct {
|
||||
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
||||
BellatrixBlock *BeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
CapellaBlock *BeaconBlockCapellaJson `json:"capella_block"`
|
||||
DenebBlock *BeaconBlockDenebJson `json:"deneb_block"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
@@ -403,6 +404,7 @@ type SignedBlindedBeaconBlockContainerJson struct {
|
||||
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
||||
BellatrixBlock *BlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
CapellaBlock *BlindedBeaconBlockCapellaJson `json:"capella_block"`
|
||||
DenebBlock *BlindedBeaconBlockDenebJson `json:"deneb_block"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
@@ -411,6 +413,7 @@ type BeaconBlockContainerV2Json struct {
|
||||
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
||||
BellatrixBlock *BeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
CapellaBlock *BeaconBlockCapellaJson `json:"capella_block"`
|
||||
DenebBlock *BeaconBlockDenebJson `json:"deneb_block"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockContainerJson struct {
|
||||
@@ -418,6 +421,7 @@ type BlindedBeaconBlockContainerJson struct {
|
||||
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
||||
BellatrixBlock *BlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
CapellaBlock *BlindedBeaconBlockCapellaJson `json:"capella_block"`
|
||||
DenebBlock *BlindedBeaconBlockDenebJson `json:"deneb_block"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockAltairContainerJson struct {
|
||||
@@ -435,6 +439,11 @@ type SignedBeaconBlockCapellaContainerJson struct {
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockDenebContainerJson struct {
|
||||
Message *BeaconBlockDenebJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockBellatrixContainerJson struct {
|
||||
Message *BlindedBeaconBlockBellatrixJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
@@ -445,6 +454,11 @@ type SignedBlindedBeaconBlockCapellaContainerJson struct {
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockDenebContainerJson struct {
|
||||
Message *BlindedBeaconBlockDenebJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type BeaconBlockAltairJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -469,6 +483,14 @@ type BeaconBlockCapellaJson struct {
|
||||
Body *BeaconBlockBodyCapellaJson `json:"body"`
|
||||
}
|
||||
|
||||
type BeaconBlockDenebJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
Body *BeaconBlockBodyDenebJson `json:"body"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBellatrixJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -485,6 +507,14 @@ type BlindedBeaconBlockCapellaJson struct {
|
||||
Body *BlindedBeaconBlockBodyCapellaJson `json:"body"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockDenebJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
Body *BlindedBeaconBlockBodyDenebJson `json:"body"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyAltairJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
@@ -524,6 +554,21 @@ type BeaconBlockBodyCapellaJson struct {
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyDenebJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti" hex:"true"`
|
||||
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
|
||||
Attestations []*AttestationJson `json:"attestations"`
|
||||
Deposits []*DepositJson `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregateJson `json:"sync_aggregate"`
|
||||
ExecutionPayload *ExecutionPayloadDenebJson `json:"execution_payload"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments" hex:"true"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyBellatrixJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
@@ -551,6 +596,21 @@ type BlindedBeaconBlockBodyCapellaJson struct {
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyDenebJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti" hex:"true"`
|
||||
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
|
||||
Attestations []*AttestationJson `json:"attestations"`
|
||||
Deposits []*DepositJson `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregateJson `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderDenebJson `json:"execution_payload_header"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments" hex:"true"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
@@ -586,6 +646,25 @@ type ExecutionPayloadCapellaJson struct {
|
||||
Withdrawals []*WithdrawalJson `json:"withdrawals"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadDenebJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
ReceiptsRoot string `json:"receipts_root" hex:"true"`
|
||||
LogsBloom string `json:"logs_bloom" hex:"true"`
|
||||
PrevRandao string `json:"prev_randao" hex:"true"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data" hex:"true"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
|
||||
ExcessDataGas string `json:"excess_data_gas" uint256:"true"`
|
||||
BlockHash string `json:"block_hash" hex:"true"`
|
||||
Transactions []string `json:"transactions" hex:"true"`
|
||||
Withdrawals []*WithdrawalJson `json:"withdrawals"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
@@ -621,6 +700,25 @@ type ExecutionPayloadHeaderCapellaJson struct {
|
||||
WithdrawalsRoot string `json:"withdrawals_root" hex:"true"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderDenebJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
ReceiptsRoot string `json:"receipts_root" hex:"true"`
|
||||
LogsBloom string `json:"logs_bloom" hex:"true"`
|
||||
PrevRandao string `json:"prev_randao" hex:"true"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data" hex:"true"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
|
||||
ExcessDataGas string `json:"excess_data_gas" uint256:"true"`
|
||||
BlockHash string `json:"block_hash" hex:"true"`
|
||||
TransactionsRoot string `json:"transactions_root" hex:"true"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root" hex:"true"`
|
||||
}
|
||||
|
||||
type SyncAggregateJson struct {
|
||||
SyncCommitteeBits string `json:"sync_committee_bits" hex:"true"`
|
||||
SyncCommitteeSignature string `json:"sync_committee_signature" hex:"true"`
|
||||
@@ -877,11 +975,42 @@ type BeaconStateCapellaJson struct {
|
||||
HistoricalSummaries []*HistoricalSummaryJson `json:"historical_summaries"`
|
||||
}
|
||||
|
||||
type BeaconStateDenebJson struct {
|
||||
GenesisTime string `json:"genesis_time"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root" hex:"true"`
|
||||
Slot string `json:"slot"`
|
||||
Fork *ForkJson `json:"fork"`
|
||||
LatestBlockHeader *BeaconBlockHeaderJson `json:"latest_block_header"`
|
||||
BlockRoots []string `json:"block_roots" hex:"true"`
|
||||
StateRoots []string `json:"state_roots" hex:"true"`
|
||||
HistoricalRoots []string `json:"historical_roots" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Eth1DataVotes []*Eth1DataJson `json:"eth1_data_votes"`
|
||||
Eth1DepositIndex string `json:"eth1_deposit_index"`
|
||||
Validators []*ValidatorJson `json:"validators"`
|
||||
Balances []string `json:"balances"`
|
||||
RandaoMixes []string `json:"randao_mixes" hex:"true"`
|
||||
Slashings []string `json:"slashings"`
|
||||
PreviousEpochParticipation EpochParticipation `json:"previous_epoch_participation"`
|
||||
CurrentEpochParticipation EpochParticipation `json:"current_epoch_participation"`
|
||||
JustificationBits string `json:"justification_bits" hex:"true"`
|
||||
PreviousJustifiedCheckpoint *CheckpointJson `json:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *CheckpointJson `json:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
|
||||
InactivityScores []string `json:"inactivity_scores"`
|
||||
CurrentSyncCommittee *SyncCommitteeJson `json:"current_sync_committee"`
|
||||
NextSyncCommittee *SyncCommitteeJson `json:"next_sync_committee"`
|
||||
LatestExecutionPayloadHeader *ExecutionPayloadHeaderDenebJson `json:"latest_execution_payload_header"`
|
||||
NextWithdrawalIndex string `json:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
}
|
||||
|
||||
type BeaconStateContainerV2Json struct {
|
||||
Phase0State *BeaconStateJson `json:"phase0_state"`
|
||||
AltairState *BeaconStateAltairJson `json:"altair_state"`
|
||||
BellatrixState *BeaconStateBellatrixJson `json:"bellatrix_state"`
|
||||
CapellaState *BeaconStateCapellaJson `json:"capella_state"`
|
||||
DenebState *BeaconStateDenebJson `json:"deneb_state"`
|
||||
}
|
||||
|
||||
type ForkJson struct {
|
||||
@@ -1185,7 +1314,7 @@ type EventPayloadAttributeV2Json struct {
|
||||
ParentBlockNumber string `json:"parent_block_number"`
|
||||
ParentBlockRoot string `json:"parent_block_root" hex:"true"`
|
||||
ParentBlockHash string `json:"parent_block_hash" hex:"true"`
|
||||
PayloadAttributes *PayloadAttributesV2Json `json:"payload_attributes_v2"`
|
||||
PayloadAttributes *PayloadAttributesV2Json `json:"payload_attributes"`
|
||||
}
|
||||
|
||||
type PayloadAttributesV1Json struct {
|
||||
|
||||
@@ -520,8 +520,10 @@ func TestServer_GetBlindedBlock(t *testing.T) {
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
Optimistic: true,
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
OptimisticRoots: map[[32]byte]bool{
|
||||
bytesutil.ToBytes32(headBlock.BlockRoot): true,
|
||||
},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
@@ -734,8 +736,10 @@ func TestServer_GetBlindedBlockSSZ(t *testing.T) {
|
||||
Block: chainBlk,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
Optimistic: true,
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
OptimisticRoots: map[[32]byte]bool{
|
||||
bytesutil.ToBytes32(headBlock.BlockRoot): true,
|
||||
},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
|
||||
@@ -382,6 +382,14 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
|
||||
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = bs.getBlockDeneb(ctx, blk)
|
||||
if result != nil {
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
|
||||
}
|
||||
|
||||
@@ -436,6 +444,14 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
|
||||
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = bs.getSSZBlockDeneb(ctx, blk)
|
||||
if result != nil {
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
|
||||
}
|
||||
@@ -817,6 +833,76 @@ func (bs *Server) getBlockCapella(ctx context.Context, blk interfaces.ReadOnlySi
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) getBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlockResponseV2, error) {
|
||||
denebBlk, err := blk.PbDenebBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
if blindedDenebBlk, err := blk.PbBlindedDenebBlock(); err == nil {
|
||||
if blindedDenebBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
signedFullBlock, err := bs.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
|
||||
}
|
||||
denebBlk, err = signedFullBlock.PbDenebBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlockResponseV2{
|
||||
Version: ethpbv2.Version_Deneb,
|
||||
Data: ðpbv2.SignedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBeaconBlockContainer_DenebBlock{DenebBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if denebBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlockResponseV2{
|
||||
Version: ethpbv2.Version_Deneb,
|
||||
Data: ðpbv2.SignedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBeaconBlockContainer_DenebBlock{DenebBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getSSZBlockPhase0(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
|
||||
phase0Blk, err := blk.PbPhase0Block()
|
||||
if err != nil {
|
||||
@@ -1012,6 +1098,82 @@ func (bs *Server) getSSZBlockCapella(ctx context.Context, blk interfaces.ReadOnl
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_CAPELLA, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) getSSZBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
|
||||
denebBlk, err := blk.PbDenebBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
if blindedDenebBlk, err := blk.PbBlindedDenebBlock(); err == nil {
|
||||
if blindedDenebBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
signedFullBlock, err := bs.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
|
||||
}
|
||||
denebBlk, err = signedFullBlock.PbDenebBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert signed beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
data := ðpbv2.SignedBeaconBlockDeneb{
|
||||
Message: v2Blk,
|
||||
Signature: sig[:],
|
||||
}
|
||||
sszData, err := data.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_Deneb,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Data: sszData,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if denebBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert signed beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
data := ðpbv2.SignedBeaconBlockDeneb{
|
||||
Message: v2Blk,
|
||||
Signature: sig[:],
|
||||
}
|
||||
sszData, err := data.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_Deneb, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) submitPhase0Block(ctx context.Context, phase0Blk *ethpbv1.BeaconBlock, sig []byte) error {
|
||||
v1alpha1Blk, err := migration.V1ToV1Alpha1SignedBlock(ðpbv1.SignedBeaconBlock{Block: phase0Blk, Signature: sig})
|
||||
if err != nil {
|
||||
|
||||
@@ -471,6 +471,9 @@ func TestServer_GetBlockHeader(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
Optimistic: true,
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
OptimisticRoots: map[[32]byte]bool{
|
||||
bytesutil.ToBytes32(headBlock.BlockRoot): true,
|
||||
},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
@@ -634,6 +637,9 @@ func TestServer_ListBlockHeaders(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
Optimistic: true,
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
OptimisticRoots: map[[32]byte]bool{
|
||||
bytesutil.ToBytes32(blkContainers[30].BlockRoot): true,
|
||||
},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
@@ -1666,6 +1672,9 @@ func TestServer_GetBlockV2(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
Optimistic: true,
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
OptimisticRoots: map[[32]byte]bool{
|
||||
bytesutil.ToBytes32(headBlock.BlockRoot): true,
|
||||
},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
@@ -1927,6 +1936,9 @@ func TestServer_GetBlockSSZV2(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
Optimistic: true,
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
OptimisticRoots: map[[32]byte]bool{
|
||||
bytesutil.ToBytes32(headBlock.BlockRoot): true,
|
||||
},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
@@ -2098,6 +2110,9 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
Optimistic: true,
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
OptimisticRoots: map[[32]byte]bool{
|
||||
bytesutil.ToBytes32(headBlock.BlockRoot): true,
|
||||
},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
@@ -2494,6 +2509,9 @@ func TestServer_ListBlockAttestations(t *testing.T) {
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
Optimistic: true,
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
OptimisticRoots: map[[32]byte]bool{
|
||||
bytesutil.ToBytes32(headBlock.BlockRoot): true,
|
||||
},
|
||||
}
|
||||
bs := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
|
||||
@@ -104,6 +104,8 @@ func TestGetSpec(t *testing.T) {
|
||||
config.MaxWithdrawalsPerPayload = 74
|
||||
config.MaxBlsToExecutionChanges = 75
|
||||
config.MaxValidatorsPerWithdrawalsSweep = 76
|
||||
config.DenebForkEpoch = 77
|
||||
config.DenebForkVersion = []byte("DenebForkVersion")
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
@@ -129,6 +131,9 @@ func TestGetSpec(t *testing.T) {
|
||||
var dam [4]byte
|
||||
copy(dam[:], []byte{'1', '0', '0', '0'})
|
||||
config.DomainApplicationMask = dam
|
||||
var dbs [4]byte
|
||||
copy(dbs[:], []byte{'0', '0', '0', '8'})
|
||||
config.DomainBlobSidecar = dbs
|
||||
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
@@ -136,7 +141,7 @@ func TestGetSpec(t *testing.T) {
|
||||
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 105, len(resp.Data))
|
||||
assert.Equal(t, 108, len(resp.Data))
|
||||
for k, v := range resp.Data {
|
||||
switch k {
|
||||
case "CONFIG_NAME":
|
||||
@@ -362,8 +367,14 @@ func TestGetSpec(t *testing.T) {
|
||||
case "REORG_WEIGHT_THRESHOLD":
|
||||
assert.Equal(t, "20", v)
|
||||
case "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY":
|
||||
case "DENEB_FORK_EPOCH":
|
||||
assert.Equal(t, "77", v)
|
||||
case "DENEB_FORK_VERSION":
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("DenebForkVersion")), v)
|
||||
case "DOMAIN_BLOB_SIDECAR":
|
||||
assert.Equal(t, "0x30303038", v)
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
t.Errorf("Unknown key: %s", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ func (bs *Server) GetStateRoot(ctx context.Context, req *ethpb.StateRequest) (*e
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get state: %v", err)
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.StateFetcher, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -100,7 +100,7 @@ func (bs *Server) GetStateFork(ctx context.Context, req *ethpb.StateRequest) (*e
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
fork := st.Fork()
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.StateFetcher, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -131,7 +131,7 @@ func (bs *Server) GetFinalityCheckpoints(ctx context.Context, req *ethpb.StateRe
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.StateFetcher, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -186,7 +186,7 @@ func (bs *Server) GetRandao(ctx context.Context, req *eth2.RandaoRequest) (*eth2
|
||||
return nil, status.Errorf(codes.Internal, "Could not get randao mix at index %d", idx)
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.StateFetcher, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
|
||||
@@ -94,7 +94,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
resp, err := server.GetStateRoot(context.Background(), ð.StateRequest{
|
||||
StateId: make([]byte, 0),
|
||||
StateId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
@@ -121,7 +121,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetStateRoot(context.Background(), ð.StateRequest{
|
||||
StateId: make([]byte, 0),
|
||||
StateId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
@@ -155,7 +155,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetStateRoot(context.Background(), ð.StateRequest{
|
||||
StateId: make([]byte, 0),
|
||||
StateId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
@@ -189,7 +189,7 @@ func TestGetStateFork(t *testing.T) {
|
||||
}
|
||||
|
||||
resp, err := server.GetStateFork(ctx, ð.StateRequest{
|
||||
StateId: make([]byte, 0),
|
||||
StateId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
@@ -218,7 +218,7 @@ func TestGetStateFork(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetStateFork(context.Background(), ð.StateRequest{
|
||||
StateId: make([]byte, 0),
|
||||
StateId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
@@ -251,7 +251,7 @@ func TestGetStateFork(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetStateFork(context.Background(), ð.StateRequest{
|
||||
StateId: make([]byte, 0),
|
||||
StateId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
@@ -292,7 +292,7 @@ func TestGetFinalityCheckpoints(t *testing.T) {
|
||||
}
|
||||
|
||||
resp, err := server.GetFinalityCheckpoints(ctx, ð.StateRequest{
|
||||
StateId: make([]byte, 0),
|
||||
StateId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
@@ -323,7 +323,7 @@ func TestGetFinalityCheckpoints(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetFinalityCheckpoints(context.Background(), ð.StateRequest{
|
||||
StateId: make([]byte, 0),
|
||||
StateId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
@@ -356,7 +356,7 @@ func TestGetFinalityCheckpoints(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetFinalityCheckpoints(context.Background(), ð.StateRequest{
|
||||
StateId: make([]byte, 0),
|
||||
StateId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
@@ -398,17 +398,17 @@ func TestGetRandao(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("no epoch requested", func(t *testing.T) {
|
||||
resp, err := server.GetRandao(ctx, ð2.RandaoRequest{StateId: make([]byte, 0)})
|
||||
resp, err := server.GetRandao(ctx, ð2.RandaoRequest{StateId: []byte("head")})
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, mixCurrent, resp.Data.Randao)
|
||||
})
|
||||
t.Run("current epoch requested", func(t *testing.T) {
|
||||
resp, err := server.GetRandao(ctx, ð2.RandaoRequest{StateId: make([]byte, 0), Epoch: &epochCurrent})
|
||||
resp, err := server.GetRandao(ctx, ð2.RandaoRequest{StateId: []byte("head"), Epoch: &epochCurrent})
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, mixCurrent, resp.Data.Randao)
|
||||
})
|
||||
t.Run("old epoch requested", func(t *testing.T) {
|
||||
resp, err := server.GetRandao(ctx, ð2.RandaoRequest{StateId: make([]byte, 0), Epoch: &epochOld})
|
||||
resp, err := server.GetRandao(ctx, ð2.RandaoRequest{StateId: []byte("head"), Epoch: &epochOld})
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, mixOld, resp.Data.Randao)
|
||||
})
|
||||
@@ -450,7 +450,7 @@ func TestGetRandao(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetRandao(context.Background(), ð2.RandaoRequest{
|
||||
StateId: make([]byte, 0),
|
||||
StateId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
@@ -482,7 +482,7 @@ func TestGetRandao(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetRandao(context.Background(), ð2.RandaoRequest{
|
||||
StateId: make([]byte, 0),
|
||||
StateId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
|
||||
@@ -91,7 +91,7 @@ func (bs *Server) ListSyncCommittees(ctx context.Context, req *ethpbv2.StateSync
|
||||
return nil, status.Errorf(codes.Internal, "Could not extract sync subcommittees: %v", err)
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.StateFetcher, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
|
||||
@@ -161,7 +161,8 @@ func TestListSyncCommittees(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
db := dbTest.SetupDB(t)
|
||||
|
||||
chainService := &mock.ChainService{}
|
||||
stSlot := st.Slot()
|
||||
chainService := &mock.ChainService{Slot: &stSlot}
|
||||
s := &Server{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
@@ -173,6 +174,7 @@ func TestListSyncCommittees(t *testing.T) {
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
ChainInfoFetcher: chainService,
|
||||
}
|
||||
req := ðpbv2.StateSyncCommitteesRequest{StateId: stRoot[:]}
|
||||
resp, err := s.ListSyncCommittees(ctx, req)
|
||||
@@ -205,7 +207,8 @@ func TestListSyncCommittees(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, db, blk)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
chainService := &mock.ChainService{Optimistic: true}
|
||||
stSlot := st.Slot()
|
||||
chainService := &mock.ChainService{Optimistic: true, Slot: &stSlot}
|
||||
s := &Server{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
@@ -217,6 +220,7 @@ func TestListSyncCommittees(t *testing.T) {
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
ChainInfoFetcher: chainService,
|
||||
}
|
||||
resp, err := s.ListSyncCommittees(ctx, req)
|
||||
require.NoError(t, err)
|
||||
@@ -234,10 +238,12 @@ func TestListSyncCommittees(t *testing.T) {
|
||||
|
||||
headerRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
stSlot := st.Slot()
|
||||
chainService := &mock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{
|
||||
headerRoot: true,
|
||||
},
|
||||
Slot: &stSlot,
|
||||
}
|
||||
s := &Server{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -250,6 +256,7 @@ func TestListSyncCommittees(t *testing.T) {
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
ChainInfoFetcher: chainService,
|
||||
}
|
||||
resp, err := s.ListSyncCommittees(ctx, req)
|
||||
require.NoError(t, err)
|
||||
@@ -310,7 +317,7 @@ func TestListSyncCommitteesFuture(t *testing.T) {
|
||||
FinalizationFetcher: chainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
req := ðpbv2.StateSyncCommitteesRequest{}
|
||||
req := ðpbv2.StateSyncCommitteesRequest{StateId: []byte("head")}
|
||||
epoch := 2 * params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
req.Epoch = &epoch
|
||||
_, err := s.ListSyncCommittees(ctx, req)
|
||||
|
||||
@@ -57,7 +57,7 @@ func (bs *Server) GetValidator(ctx context.Context, req *ethpb.StateValidatorReq
|
||||
return nil, status.Error(codes.NotFound, "Could not find validator")
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.StateFetcher, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -86,7 +86,7 @@ func (bs *Server) ListValidators(ctx context.Context, req *ethpb.StateValidators
|
||||
return nil, handleValContainerErr(err)
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.StateFetcher, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -155,7 +155,7 @@ func (bs *Server) ListValidatorBalances(ctx context.Context, req *ethpb.Validato
|
||||
}
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.StateFetcher, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -220,7 +220,7 @@ func (bs *Server) ListCommittees(ctx context.Context, req *ethpb.StateCommittees
|
||||
}
|
||||
}
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, st, bs.OptimisticModeFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, bs.OptimisticModeFetcher, bs.StateFetcher, bs.ChainInfoFetcher, bs.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.BeaconState
|
||||
if err != nil {
|
||||
return nil, helpers.PrepareStateFetchGRPCError(err)
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, beaconSt, ds.OptimisticModeFetcher)
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, req.StateId, ds.OptimisticModeFetcher, ds.StateFetcher, ds.ChainInfoFetcher, ds.BeaconDB)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
@@ -104,6 +104,19 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.BeaconState
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: isFinalized,
|
||||
}, nil
|
||||
case version.Deneb:
|
||||
protoState, err := migration.BeaconStateDenebToProto(beaconSt)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not convert state to proto: %v", err)
|
||||
}
|
||||
return ðpbv2.BeaconStateResponseV2{
|
||||
Version: ethpbv2.Version_Deneb,
|
||||
Data: ðpbv2.BeaconStateContainer{
|
||||
State: ðpbv2.BeaconStateContainer_DenebState{DenebState: protoState},
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
|
||||
default:
|
||||
return nil, status.Error(codes.Internal, "Unsupported state version")
|
||||
}
|
||||
@@ -133,6 +146,8 @@ func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.BeaconSt
|
||||
ver = ethpbv2.Version_BELLATRIX
|
||||
case version.Capella:
|
||||
ver = ethpbv2.Version_CAPELLA
|
||||
case version.Deneb:
|
||||
ver = ethpbv2.Version_Deneb
|
||||
default:
|
||||
return nil, status.Error(codes.Internal, "Unsupported state version")
|
||||
}
|
||||
@@ -166,7 +181,5 @@ func (ds *Server) ListForkChoiceHeadsV2(ctx context.Context, _ *emptypb.Empty) (
|
||||
|
||||
// GetForkChoice returns a dump fork choice store.
|
||||
func (ds *Server) GetForkChoice(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.ForkChoiceDump, error) {
|
||||
ds.ForkFetcher.ForkChoicer().RLock()
|
||||
defer ds.ForkFetcher.ForkChoicer().RUnlock()
|
||||
return ds.ForkFetcher.ForkChoicer().ForkChoiceDump(ctx)
|
||||
return ds.ForkchoiceFetcher.ForkChoiceDump(ctx)
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetBeaconStateV2(context.Background(), ðpbv2.BeaconStateRequestV2{
|
||||
StateId: make([]byte, 0),
|
||||
StateId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
@@ -55,7 +55,7 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetBeaconStateV2(context.Background(), ðpbv2.BeaconStateRequestV2{
|
||||
StateId: make([]byte, 0),
|
||||
StateId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
@@ -73,7 +73,7 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetBeaconStateV2(context.Background(), ðpbv2.BeaconStateRequestV2{
|
||||
StateId: make([]byte, 0),
|
||||
StateId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
@@ -91,7 +91,7 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetBeaconStateV2(context.Background(), ðpbv2.BeaconStateRequestV2{
|
||||
StateId: make([]byte, 0),
|
||||
StateId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
@@ -117,7 +117,7 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetBeaconStateV2(context.Background(), ðpbv2.BeaconStateRequestV2{
|
||||
StateId: make([]byte, 0),
|
||||
StateId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
@@ -150,7 +150,7 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
}
|
||||
resp, err := server.GetBeaconStateV2(context.Background(), ðpbv2.BeaconStateRequestV2{
|
||||
StateId: make([]byte, 0),
|
||||
StateId: []byte("head"),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
@@ -293,7 +293,13 @@ func TestListForkChoiceHeadsV2(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("optimistic head", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{Optimistic: true}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
Optimistic: true,
|
||||
OptimisticRoots: make(map[[32]byte]bool),
|
||||
}
|
||||
for _, sr := range expectedSlotsAndRoots {
|
||||
chainService.OptimisticRoots[sr.Root] = true
|
||||
}
|
||||
server := &Server{
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
@@ -320,7 +326,7 @@ func TestServer_GetForkChoice(t *testing.T) {
|
||||
fRoot := [32]byte{'a'}
|
||||
fc := &forkchoicetypes.Checkpoint{Epoch: 2, Root: fRoot}
|
||||
require.NoError(t, store.UpdateFinalizedCheckpoint(fc))
|
||||
bs := &Server{ForkFetcher: &blockchainmock.ChainService{ForkChoiceStore: store}}
|
||||
bs := &Server{ForkchoiceFetcher: &blockchainmock.ChainService{ForkChoiceStore: store}}
|
||||
res, err := bs.GetForkChoice(context.Background(), &empty.Empty{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Epoch(2), res.FinalizedCheckpoint.Epoch, "Did not get wanted finalized epoch")
|
||||
|
||||
@@ -17,5 +17,7 @@ type Server struct {
|
||||
StateFetcher statefetcher.Fetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
ForkFetcher blockchain.ForkFetcher
|
||||
ForkchoiceFetcher blockchain.ForkchoiceFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
}
|
||||
|
||||
@@ -331,7 +331,7 @@ func (s *Server) streamPayloadAttributes(stream ethpbservice.Events_StreamEvents
|
||||
ParentBlockNumber: headPayload.BlockNumber(),
|
||||
ParentBlockRoot: headRoot,
|
||||
ParentBlockHash: headPayload.BlockHash(),
|
||||
PayloadAttributesV2: &enginev1.PayloadAttributesV2{
|
||||
PayloadAttributes: &enginev1.PayloadAttributesV2{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: headPayload.FeeRecipient(),
|
||||
|
||||
@@ -494,7 +494,7 @@ func TestStreamEvents_StateEvents(t *testing.T) {
|
||||
ParentBlockNumber: 1,
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
PayloadAttributesV2: &enginev1.PayloadAttributesV2{
|
||||
PayloadAttributes: &enginev1.PayloadAttributesV2{
|
||||
Timestamp: 24,
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: make([]byte, 20),
|
||||
|
||||
@@ -12,13 +12,16 @@ go_library(
|
||||
deps = [
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/rpc/statefetcher:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
@@ -35,12 +38,21 @@ go_test(
|
||||
deps = [
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync/testing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
|
||||
@@ -1,16 +1,19 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/statefetcher"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@@ -53,26 +56,126 @@ func ValidateSync(
|
||||
return status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
// IsOptimistic checks whether the latest block header of the passed in beacon state is the header of an optimistic block.
|
||||
func IsOptimistic(ctx context.Context, st state.BeaconState, optimisticModeFetcher blockchain.OptimisticModeFetcher) (bool, error) {
|
||||
header := st.LatestBlockHeader()
|
||||
// This happens when the block at the state's slot is not missing.
|
||||
if bytes.Equal(header.StateRoot, params.BeaconConfig().ZeroHash[:]) {
|
||||
root, err := st.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get state root")
|
||||
// IsOptimistic checks whether the beacon state's block is optimistic.
|
||||
func IsOptimistic(
|
||||
ctx context.Context,
|
||||
stateId []byte,
|
||||
optimisticModeFetcher blockchain.OptimisticModeFetcher,
|
||||
stateFetcher statefetcher.Fetcher,
|
||||
chainInfo blockchain.ChainInfoFetcher,
|
||||
database db.ReadOnlyDatabase,
|
||||
) (bool, error) {
|
||||
stateIdString := strings.ToLower(string(stateId))
|
||||
switch stateIdString {
|
||||
case "head":
|
||||
return optimisticModeFetcher.IsOptimistic(ctx)
|
||||
case "genesis":
|
||||
return false, nil
|
||||
case "finalized":
|
||||
fcp := chainInfo.FinalizedCheckpt()
|
||||
if fcp == nil {
|
||||
return true, errors.New("received nil finalized checkpoint")
|
||||
}
|
||||
return optimisticModeFetcher.IsOptimisticForRoot(ctx, bytesutil.ToBytes32(fcp.Root))
|
||||
case "justified":
|
||||
jcp := chainInfo.CurrentJustifiedCheckpt()
|
||||
if jcp == nil {
|
||||
return true, errors.New("received nil justified checkpoint")
|
||||
}
|
||||
return optimisticModeFetcher.IsOptimisticForRoot(ctx, bytesutil.ToBytes32(jcp.Root))
|
||||
default:
|
||||
if len(stateId) == 32 {
|
||||
return isStateRootOptimistic(ctx, stateId, optimisticModeFetcher, stateFetcher, chainInfo, database)
|
||||
} else {
|
||||
optimistic, err := optimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return true, errors.Wrap(err, "could not check optimistic status")
|
||||
}
|
||||
if !optimistic {
|
||||
return false, nil
|
||||
}
|
||||
slotNumber, parseErr := strconv.ParseUint(stateIdString, 10, 64)
|
||||
if parseErr != nil {
|
||||
// ID format does not match any valid options.
|
||||
e := statefetcher.NewStateIdParseError(parseErr)
|
||||
return true, &e
|
||||
}
|
||||
fcp := chainInfo.FinalizedCheckpt()
|
||||
if fcp == nil {
|
||||
return true, errors.New("received nil finalized checkpoint")
|
||||
}
|
||||
finalizedSlot, err := slots.EpochStart(fcp.Epoch)
|
||||
if err != nil {
|
||||
return true, errors.Wrap(err, "could not get head state's finalized slot")
|
||||
}
|
||||
lastValidatedCheckpoint, err := database.LastValidatedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return true, errors.Wrap(err, "could not get last validated checkpoint")
|
||||
}
|
||||
validatedSlot, err := slots.EpochStart(lastValidatedCheckpoint.Epoch)
|
||||
if err != nil {
|
||||
return true, errors.Wrap(err, "could not get last validated slot")
|
||||
}
|
||||
if primitives.Slot(slotNumber) <= validatedSlot {
|
||||
return false, nil
|
||||
}
|
||||
// if the finalized checkpoint is higher than the last
|
||||
// validated checkpoint, we are syncing and have synced
|
||||
// a finalization being optimistic
|
||||
if validatedSlot < finalizedSlot {
|
||||
return true, nil
|
||||
}
|
||||
if primitives.Slot(slotNumber) == chainInfo.HeadSlot() {
|
||||
// We know the head is optimistic because we checked it above.
|
||||
return true, nil
|
||||
}
|
||||
headRoot, err := chainInfo.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return true, errors.Wrap(err, "could not get head root")
|
||||
}
|
||||
r, err := chainInfo.Ancestor(ctx, headRoot, primitives.Slot(slotNumber))
|
||||
if err != nil {
|
||||
return true, errors.Wrap(err, "could not get ancestor root")
|
||||
}
|
||||
return optimisticModeFetcher.IsOptimisticForRoot(ctx, bytesutil.ToBytes32(r))
|
||||
}
|
||||
header.StateRoot = root[:]
|
||||
}
|
||||
headRoot, err := header.HashTreeRoot()
|
||||
}
|
||||
|
||||
func isStateRootOptimistic(
|
||||
ctx context.Context,
|
||||
stateId []byte,
|
||||
optimisticModeFetcher blockchain.OptimisticModeFetcher,
|
||||
stateFetcher statefetcher.Fetcher,
|
||||
chainInfo blockchain.ChainInfoFetcher,
|
||||
database db.ReadOnlyDatabase,
|
||||
) (bool, error) {
|
||||
st, err := stateFetcher.State(ctx, stateId)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get header root")
|
||||
return true, errors.Wrap(err, "could not fetch state")
|
||||
}
|
||||
isOptimistic, err := optimisticModeFetcher.IsOptimisticForRoot(ctx, headRoot)
|
||||
if st.Slot() == chainInfo.HeadSlot() {
|
||||
return optimisticModeFetcher.IsOptimistic(ctx)
|
||||
}
|
||||
has, roots, err := database.BlockRootsBySlot(ctx, st.Slot())
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not check if block is optimistic")
|
||||
return true, errors.Wrapf(err, "could not get block roots for slot %d", st.Slot())
|
||||
}
|
||||
return isOptimistic, nil
|
||||
if !has {
|
||||
return true, errors.New("no block roots returned from the database")
|
||||
}
|
||||
for _, r := range roots {
|
||||
b, err := database.Block(ctx, r)
|
||||
if err != nil {
|
||||
return true, errors.Wrapf(err, "could not obtain block")
|
||||
}
|
||||
if bytesutil.ToBytes32(stateId) != b.Block().StateRoot() {
|
||||
continue
|
||||
}
|
||||
return optimisticModeFetcher.IsOptimisticForRoot(ctx, r)
|
||||
}
|
||||
// No block matching requested state root, return true.
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// SyncDetailsJson contains information about node sync status.
|
||||
|
||||
@@ -2,14 +2,26 @@ package helpers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
grpcutil "github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||
chainmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
syncmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
@@ -62,44 +74,257 @@ func TestValidateSync(t *testing.T) {
|
||||
|
||||
func TestIsOptimistic(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("optimistic", func(t *testing.T) {
|
||||
mockOptSyncFetcher := &chainmock.ChainService{Optimistic: true}
|
||||
o, err := IsOptimistic(ctx, st, mockOptSyncFetcher)
|
||||
t.Run("head optimistic", func(t *testing.T) {
|
||||
cs := &chainmock.ChainService{Optimistic: true}
|
||||
o, err := IsOptimistic(ctx, []byte("head"), cs, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
})
|
||||
t.Run("not optimistic", func(t *testing.T) {
|
||||
mockOptSyncFetcher := &chainmock.ChainService{Optimistic: false}
|
||||
o, err := IsOptimistic(ctx, st, mockOptSyncFetcher)
|
||||
t.Run("head not optimistic", func(t *testing.T) {
|
||||
cs := &chainmock.ChainService{Optimistic: false}
|
||||
o, err := IsOptimistic(ctx, []byte("head"), cs, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, o)
|
||||
})
|
||||
t.Run("zero state root", func(t *testing.T) {
|
||||
zeroRootSt, err := util.NewBeaconState()
|
||||
t.Run("genesis", func(t *testing.T) {
|
||||
o, err := IsOptimistic(ctx, []byte("genesis"), nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
h := zeroRootSt.LatestBlockHeader()
|
||||
h.StateRoot = make([]byte, 32)
|
||||
require.NoError(t, zeroRootSt.SetLatestBlockHeader(h))
|
||||
mockOptSyncFetcher := &chainmock.ChainService{}
|
||||
_, err = IsOptimistic(ctx, st, mockOptSyncFetcher)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(
|
||||
t,
|
||||
[32]byte{0xfc, 0x0, 0xe9, 0x6d, 0xb, 0x8b, 0x2, 0x2f, 0x61, 0xeb, 0x92, 0x10, 0xfd, 0x80, 0x84, 0x2b, 0x26, 0x61, 0xdc, 0x94, 0x5f, 0x7a, 0xf0, 0x0, 0xbc, 0x38, 0x6, 0x38, 0x71, 0x95, 0x43, 0x1},
|
||||
mockOptSyncFetcher.OptimisticCheckRootReceived,
|
||||
)
|
||||
assert.Equal(t, false, o)
|
||||
})
|
||||
t.Run("non-zero state root", func(t *testing.T) {
|
||||
mockOptSyncFetcher := &chainmock.ChainService{}
|
||||
_, err = IsOptimistic(ctx, st, mockOptSyncFetcher)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(
|
||||
t,
|
||||
[32]byte{0xfc, 0x0, 0xe9, 0x6d, 0xb, 0x8b, 0x2, 0x2f, 0x61, 0xeb, 0x92, 0x10, 0xfd, 0x80, 0x84, 0x2b, 0x26, 0x61, 0xdc, 0x94, 0x5f, 0x7a, 0xf0, 0x0, 0xbc, 0x38, 0x6, 0x38, 0x71, 0x95, 0x43, 0x1},
|
||||
mockOptSyncFetcher.OptimisticCheckRootReceived,
|
||||
)
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
t.Run("finalized checkpoint is optimistic", func(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{Optimistic: true, FinalizedCheckPoint: ð.Checkpoint{}, OptimisticRoots: map[[32]byte]bool{[32]byte{}: true}}
|
||||
mf := &testutil.MockFetcher{BeaconState: st}
|
||||
o, err := IsOptimistic(ctx, []byte("finalized"), cs, mf, cs, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
})
|
||||
t.Run("finalized checkpoint is not optimistic", func(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{Optimistic: true, FinalizedCheckPoint: ð.Checkpoint{}}
|
||||
mf := &testutil.MockFetcher{BeaconState: st}
|
||||
o, err := IsOptimistic(ctx, []byte("finalized"), cs, mf, cs, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, o)
|
||||
})
|
||||
})
|
||||
t.Run("justified", func(t *testing.T) {
|
||||
t.Run("justified checkpoint is optimistic", func(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{Optimistic: true, CurrentJustifiedCheckPoint: ð.Checkpoint{}, OptimisticRoots: map[[32]byte]bool{[32]byte{}: true}}
|
||||
mf := &testutil.MockFetcher{BeaconState: st}
|
||||
o, err := IsOptimistic(ctx, []byte("justified"), cs, mf, cs, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
})
|
||||
t.Run("justified checkpoint is not optimistic", func(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{Optimistic: true, CurrentJustifiedCheckPoint: ð.Checkpoint{}}
|
||||
mf := &testutil.MockFetcher{BeaconState: st}
|
||||
o, err := IsOptimistic(ctx, []byte("justified"), cs, mf, cs, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, o)
|
||||
})
|
||||
})
|
||||
t.Run("root", func(t *testing.T) {
|
||||
t.Run("is head and head is optimistic", func(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{Optimistic: true}
|
||||
mf := &testutil.MockFetcher{BeaconState: st}
|
||||
o, err := IsOptimistic(ctx, bytesutil.PadTo([]byte("root"), 32), cs, mf, cs, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
})
|
||||
t.Run("is head and head is not optimistic", func(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{Optimistic: false}
|
||||
mf := &testutil.MockFetcher{BeaconState: st}
|
||||
o, err := IsOptimistic(ctx, bytesutil.PadTo([]byte("root"), 32), cs, mf, cs, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, o)
|
||||
})
|
||||
t.Run("root is optimistic", func(t *testing.T) {
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
b.SetStateRoot(bytesutil.PadTo([]byte("root"), 32))
|
||||
db := dbtest.SetupDB(t)
|
||||
require.NoError(t, db.SaveBlock(ctx, b))
|
||||
fetcherSt, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainSt, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, chainSt.SetSlot(fieldparams.SlotsPerEpoch))
|
||||
bRoot, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{State: chainSt, OptimisticRoots: map[[32]byte]bool{bRoot: true}}
|
||||
mf := &testutil.MockFetcher{BeaconState: fetcherSt}
|
||||
o, err := IsOptimistic(ctx, bytesutil.PadTo([]byte("root"), 32), cs, mf, cs, db)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
})
|
||||
t.Run("root is not optimistic", func(t *testing.T) {
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
b.SetStateRoot(bytesutil.PadTo([]byte("root"), 32))
|
||||
db := dbtest.SetupDB(t)
|
||||
require.NoError(t, db.SaveBlock(ctx, b))
|
||||
fetcherSt, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainSt, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, chainSt.SetSlot(fieldparams.SlotsPerEpoch))
|
||||
cs := &chainmock.ChainService{State: chainSt}
|
||||
mf := &testutil.MockFetcher{BeaconState: fetcherSt}
|
||||
o, err := IsOptimistic(ctx, bytesutil.PadTo([]byte("root"), 32), cs, mf, cs, db)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, o)
|
||||
})
|
||||
t.Run("no canonical blocks", func(t *testing.T) {
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
db := dbtest.SetupDB(t)
|
||||
require.NoError(t, db.SaveBlock(ctx, b))
|
||||
fetcherSt, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainSt, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, chainSt.SetSlot(fieldparams.SlotsPerEpoch))
|
||||
cs := &chainmock.ChainService{Optimistic: false, State: chainSt, CanonicalRoots: map[[32]byte]bool{}}
|
||||
mf := &testutil.MockFetcher{BeaconState: fetcherSt}
|
||||
o, err := IsOptimistic(ctx, bytesutil.PadTo([]byte("root"), 32), nil, mf, cs, db)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
})
|
||||
})
|
||||
t.Run("slot", func(t *testing.T) {
|
||||
t.Run("head is not optimistic", func(t *testing.T) {
|
||||
cs := &chainmock.ChainService{Optimistic: false}
|
||||
o, err := IsOptimistic(ctx, []byte("0"), cs, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, o)
|
||||
})
|
||||
t.Run("is before validated slot when head is optimistic", func(t *testing.T) {
|
||||
db := dbtest.SetupDB(t)
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ð.StateSummary{Slot: fieldparams.SlotsPerEpoch, Root: []byte("root")}))
|
||||
require.NoError(t, db.SaveLastValidatedCheckpoint(ctx, ð.Checkpoint{Epoch: 1, Root: []byte("root")}))
|
||||
cs := &chainmock.ChainService{Optimistic: true, FinalizedCheckPoint: ð.Checkpoint{Epoch: 1}}
|
||||
o, err := IsOptimistic(ctx, []byte("0"), cs, nil, cs, db)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, o)
|
||||
})
|
||||
t.Run("is equal to validated slot when head is optimistic", func(t *testing.T) {
|
||||
db := dbtest.SetupDB(t)
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ð.StateSummary{Slot: fieldparams.SlotsPerEpoch, Root: []byte("root")}))
|
||||
require.NoError(t, db.SaveLastValidatedCheckpoint(ctx, ð.Checkpoint{Epoch: 1, Root: []byte("root")}))
|
||||
cs := &chainmock.ChainService{Optimistic: true, FinalizedCheckPoint: ð.Checkpoint{Epoch: 1}}
|
||||
o, err := IsOptimistic(ctx, []byte("32"), cs, nil, cs, db)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, o)
|
||||
})
|
||||
t.Run("is after validated slot and validated slot is before finalized slot", func(t *testing.T) {
|
||||
db := dbtest.SetupDB(t)
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ð.StateSummary{Slot: fieldparams.SlotsPerEpoch, Root: []byte("root")}))
|
||||
require.NoError(t, db.SaveLastValidatedCheckpoint(ctx, ð.Checkpoint{Epoch: 1, Root: []byte("root")}))
|
||||
cs := &chainmock.ChainService{Optimistic: true, FinalizedCheckPoint: ð.Checkpoint{Epoch: 2}}
|
||||
o, err := IsOptimistic(ctx, []byte("33"), cs, nil, cs, db)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
})
|
||||
t.Run("is head", func(t *testing.T) {
|
||||
db := dbtest.SetupDB(t)
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ð.StateSummary{Slot: fieldparams.SlotsPerEpoch, Root: []byte("root")}))
|
||||
require.NoError(t, db.SaveLastValidatedCheckpoint(ctx, ð.Checkpoint{Epoch: 1, Root: []byte("root")}))
|
||||
fetcherSt, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainSt, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, chainSt.SetSlot(fieldparams.SlotsPerEpoch*2))
|
||||
cs := &chainmock.ChainService{Optimistic: true, State: chainSt, FinalizedCheckPoint: ð.Checkpoint{Epoch: 0}}
|
||||
mf := &testutil.MockFetcher{BeaconState: fetcherSt}
|
||||
o, err := IsOptimistic(ctx, []byte(strconv.Itoa(fieldparams.SlotsPerEpoch*2)), cs, mf, cs, db)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
})
|
||||
t.Run("ancestor is optimistic", func(t *testing.T) {
|
||||
db := dbtest.SetupDB(t)
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ð.StateSummary{Slot: fieldparams.SlotsPerEpoch, Root: []byte("root")}))
|
||||
require.NoError(t, db.SaveLastValidatedCheckpoint(ctx, ð.Checkpoint{Epoch: 1, Root: []byte("root")}))
|
||||
r := bytesutil.ToBytes32([]byte("root"))
|
||||
fcs := doublylinkedtree.New()
|
||||
finalizedCheckpt := ð.Checkpoint{Epoch: 0}
|
||||
st, root, err := prepareForkchoiceState(fieldparams.SlotsPerEpoch*2, r, [32]byte{}, [32]byte{}, finalizedCheckpt, finalizedCheckpt)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
headRoot := [32]byte{'r'}
|
||||
st, root, err = prepareForkchoiceState(fieldparams.SlotsPerEpoch*2+1, headRoot, r, [32]byte{}, finalizedCheckpt, finalizedCheckpt)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
cs := &chainmock.ChainService{Root: headRoot[:], Optimistic: true, ForkChoiceStore: fcs, OptimisticRoots: map[[32]byte]bool{r: true}, FinalizedCheckPoint: finalizedCheckpt}
|
||||
mf := &testutil.MockFetcher{BeaconState: st}
|
||||
o, err := IsOptimistic(ctx, []byte(strconv.Itoa(fieldparams.SlotsPerEpoch*2)), cs, mf, cs, db)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
})
|
||||
t.Run("ancestor is not optimistic", func(t *testing.T) {
|
||||
db := dbtest.SetupDB(t)
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ð.StateSummary{Slot: fieldparams.SlotsPerEpoch, Root: []byte("root")}))
|
||||
require.NoError(t, db.SaveLastValidatedCheckpoint(ctx, ð.Checkpoint{Epoch: 1, Root: []byte("root")}))
|
||||
r := bytesutil.ToBytes32([]byte("root"))
|
||||
fcs := doublylinkedtree.New()
|
||||
finalizedCheckpt := ð.Checkpoint{Epoch: 0}
|
||||
st, root, err := prepareForkchoiceState(fieldparams.SlotsPerEpoch*2, r, [32]byte{}, [32]byte{}, finalizedCheckpt, finalizedCheckpt)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
headRoot := [32]byte{'r'}
|
||||
st, root, err = prepareForkchoiceState(fieldparams.SlotsPerEpoch*2+1, headRoot, r, [32]byte{}, finalizedCheckpt, finalizedCheckpt)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
cs := &chainmock.ChainService{Root: headRoot[:], Optimistic: true, ForkChoiceStore: fcs, OptimisticRoots: map[[32]byte]bool{r: false}, FinalizedCheckPoint: finalizedCheckpt}
|
||||
mf := &testutil.MockFetcher{BeaconState: st}
|
||||
o, err := IsOptimistic(ctx, []byte(strconv.Itoa(fieldparams.SlotsPerEpoch*2)), cs, mf, cs, db)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, o)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// prepareForkchoiceState prepares a beacon state with the given data to mock
|
||||
// insert into forkchoice
|
||||
func prepareForkchoiceState(
|
||||
slot primitives.Slot,
|
||||
blockRoot [32]byte,
|
||||
parentRoot [32]byte,
|
||||
payloadHash [32]byte,
|
||||
justified *eth.Checkpoint,
|
||||
finalized *eth.Checkpoint,
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
blockHeader := ð.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
|
||||
executionHeader := &enginev1.ExecutionPayloadHeader{
|
||||
BlockHash: payloadHash[:],
|
||||
}
|
||||
|
||||
base := ð.BeaconStateBellatrix{
|
||||
Slot: slot,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
BlockRoots: make([][]byte, 1),
|
||||
CurrentJustifiedCheckpoint: justified,
|
||||
FinalizedCheckpoint: finalized,
|
||||
LatestExecutionPayloadHeader: executionHeader,
|
||||
LatestBlockHeader: blockHeader,
|
||||
}
|
||||
|
||||
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
return st, blockRoot, err
|
||||
}
|
||||
|
||||
@@ -13,8 +13,10 @@ go_library(
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
|
||||
@@ -3,7 +3,9 @@ package validator
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
v1alpha1validator "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||
@@ -15,7 +17,6 @@ import (
|
||||
// providing RPC endpoints intended for validator clients.
|
||||
type Server struct {
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
HeadUpdater blockchain.HeadUpdater
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
SyncChecker sync.Checker
|
||||
AttestationsPool attestations.Pool
|
||||
@@ -24,6 +25,9 @@ type Server struct {
|
||||
StateFetcher statefetcher.Fetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
BLSChangesPool blstoexec.PoolManager
|
||||
V1Alpha1Server *v1alpha1validator.Server
|
||||
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
}
|
||||
|
||||
@@ -278,7 +278,14 @@ func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpbv2.SyncC
|
||||
return nil, status.Errorf(codes.Internal, "Could not get duties: %v", err)
|
||||
}
|
||||
|
||||
isOptimistic, err := rpchelpers.IsOptimistic(ctx, st, vs.OptimisticModeFetcher)
|
||||
isOptimistic, err := rpchelpers.IsOptimistic(
|
||||
ctx,
|
||||
[]byte(strconv.FormatUint(uint64(slot), 10)),
|
||||
vs.OptimisticModeFetcher,
|
||||
vs.StateFetcher,
|
||||
vs.ChainInfoFetcher,
|
||||
vs.BeaconDB,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check if slot's block is optimistic: %v", err)
|
||||
}
|
||||
|
||||
@@ -466,7 +466,6 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
|
||||
}
|
||||
require.NoError(t, st.SetNextSyncCommittee(nextCommittee))
|
||||
db := dbutil.SetupDB(t)
|
||||
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime}
|
||||
vs := &Server{
|
||||
@@ -636,6 +635,10 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ðpbalpha.StateSummary{Slot: 0, Root: []byte("root")}))
|
||||
require.NoError(t, db.SaveLastValidatedCheckpoint(ctx, ðpbalpha.Checkpoint{Epoch: 0, Root: []byte("root")}))
|
||||
|
||||
parentRoot := [32]byte{'a'}
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
@@ -644,16 +647,34 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, db, blk)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime, Optimistic: true}
|
||||
slot, err := slots.EpochStart(1)
|
||||
require.NoError(t, err)
|
||||
|
||||
state, err := util.NewBeaconStateBellatrix()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetSlot(slot))
|
||||
|
||||
mockChainService := &mockChain.ChainService{
|
||||
Genesis: genesisTime,
|
||||
Optimistic: true,
|
||||
Slot: &slot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{
|
||||
Root: root[:],
|
||||
Epoch: 1,
|
||||
},
|
||||
State: state,
|
||||
}
|
||||
vs := &Server{
|
||||
StateFetcher: &testutil.MockFetcher{BeaconState: st},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
BeaconDB: db,
|
||||
}
|
||||
req := ðpbv2.SyncCommitteeDutiesRequest{
|
||||
Epoch: 0,
|
||||
Epoch: 1,
|
||||
Index: []primitives.ValidatorIndex{1},
|
||||
}
|
||||
resp, err := vs.GetSyncCommitteeDuties(ctx, req)
|
||||
@@ -702,8 +723,8 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: mockExecutionChain,
|
||||
Eth1InfoFetcher: mockExecutionChain,
|
||||
Eth1BlockFetcher: mockExecutionChain,
|
||||
@@ -814,8 +835,8 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: mockExecutionChain,
|
||||
Eth1InfoFetcher: mockExecutionChain,
|
||||
Eth1BlockFetcher: mockExecutionChain,
|
||||
@@ -1014,8 +1035,8 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: mockExecutionChain,
|
||||
Eth1InfoFetcher: mockExecutionChain,
|
||||
Eth1BlockFetcher: mockExecutionChain,
|
||||
@@ -1257,8 +1278,8 @@ func TestProduceBlockV2(t *testing.T) {
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: mockExecutionChain,
|
||||
Eth1InfoFetcher: mockExecutionChain,
|
||||
Eth1BlockFetcher: mockExecutionChain,
|
||||
@@ -1395,8 +1416,9 @@ func TestProduceBlockV2SSZ(t *testing.T) {
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
TimeFetcher: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -1558,8 +1580,9 @@ func TestProduceBlockV2SSZ(t *testing.T) {
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
TimeFetcher: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -1764,8 +1787,8 @@ func TestProduceBlockV2SSZ(t *testing.T) {
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -2008,8 +2031,8 @@ func TestProduceBlockV2SSZ(t *testing.T) {
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -2239,8 +2262,9 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
TimeFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -2345,8 +2369,9 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
TimeFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -2536,12 +2561,12 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
v1Alpha1Server := &v1alpha1validator.Server{
|
||||
BeaconDB: db,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
TimeFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{GenesisState: beaconState},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -2759,12 +2784,12 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
ExecutionEngineCaller: &mockExecution.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &enginev1.ExecutionPayloadCapella{BlockNumber: 1, Withdrawals: wds}, BlockValue: big.NewInt(0)},
|
||||
BeaconDB: db,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
TimeFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -2909,8 +2934,9 @@ func TestProduceBlindedBlockSSZ(t *testing.T) {
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
TimeFetcher: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -3072,8 +3098,9 @@ func TestProduceBlindedBlockSSZ(t *testing.T) {
|
||||
HeadFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mockChain.ChainService{},
|
||||
HeadUpdater: mockChainService,
|
||||
TimeFetcher: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -3278,8 +3305,8 @@ func TestProduceBlindedBlockSSZ(t *testing.T) {
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
@@ -3516,8 +3543,8 @@ func TestProduceBlindedBlockSSZ(t *testing.T) {
|
||||
OptimisticModeFetcher: &mockChain.ChainService{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: mockChainService,
|
||||
HeadUpdater: mockChainService,
|
||||
ForkFetcher: mockChainService,
|
||||
ForkchoiceFetcher: mockChainService,
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
|
||||
@@ -122,7 +122,7 @@ func convertToBlockContainer(blk interfaces.ReadOnlySignedBeaconBlock, root [32]
|
||||
}
|
||||
ctr.Block = ðpb.BeaconBlockContainer_BellatrixBlock{BellatrixBlock: rBlk}
|
||||
}
|
||||
case version.Capella:
|
||||
case version.Capella, version.Deneb:
|
||||
if blk.IsBlinded() {
|
||||
rBlk, err := blk.PbBlindedCapellaBlock()
|
||||
if err != nil {
|
||||
|
||||
@@ -46,6 +46,5 @@ type Server struct {
|
||||
StateGen stategen.StateManager
|
||||
SyncChecker sync.Checker
|
||||
ReplayerBuilder stategen.ReplayerBuilder
|
||||
HeadUpdater blockchain.HeadUpdater
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
}
|
||||
|
||||
@@ -45,6 +45,7 @@ go_library(
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
@@ -85,6 +86,7 @@ go_library(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
|
||||
@@ -100,6 +100,17 @@ func sendVerifiedBlocks(stream ethpb.BeaconNodeValidator_StreamBlocksAltairServe
|
||||
return nil
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: phBlk}
|
||||
case version.Deneb:
|
||||
pb, err := data.SignedBlock.Proto()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get protobuf block")
|
||||
}
|
||||
phBlk, ok := pb.(*ethpb.SignedBeaconBlockDeneb)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockDeneb")
|
||||
return nil
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_DenebBlock{DenebBlock: phBlk}
|
||||
}
|
||||
|
||||
if err := stream.Send(b); err != nil {
|
||||
@@ -149,6 +160,8 @@ func (vs *Server) sendBlocks(stream ethpb.BeaconNodeValidator_StreamBlocksAltair
|
||||
b.Block = ðpb.StreamBlocksResponse_BellatrixBlock{BellatrixBlock: p}
|
||||
case *ethpb.SignedBeaconBlockCapella:
|
||||
b.Block = ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: p}
|
||||
case *ethpb.SignedBeaconBlockDeneb:
|
||||
b.Block = ðpb.StreamBlocksResponse_DenebBlock{DenebBlock: p}
|
||||
default:
|
||||
log.Errorf("Unknown block type %T", p)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
emptypb "github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition/interop"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -24,6 +26,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -49,14 +52,12 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
}
|
||||
|
||||
// process attestations and update head in forkchoice
|
||||
vs.ForkFetcher.ForkChoicer().Lock()
|
||||
vs.HeadUpdater.UpdateHead(ctx, vs.ForkFetcher.CurrentSlot())
|
||||
headRoot := vs.ForkFetcher.ForkChoicer().CachedHeadRoot()
|
||||
parentRoot := vs.ForkFetcher.ForkChoicer().GetProposerHead()
|
||||
vs.ForkchoiceFetcher.UpdateHead(ctx, vs.TimeFetcher.CurrentSlot())
|
||||
headRoot := vs.ForkchoiceFetcher.CachedHeadRoot()
|
||||
parentRoot := vs.ForkchoiceFetcher.GetProposerHead()
|
||||
if parentRoot != headRoot {
|
||||
blockchain.LateBlockAttemptedReorgCount.Inc()
|
||||
}
|
||||
vs.ForkFetcher.ForkChoicer().Unlock()
|
||||
|
||||
// An optimistic validator MUST NOT produce a block (i.e., sign across the DOMAIN_BEACON_PROPOSER domain).
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
|
||||
@@ -122,7 +123,8 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
vs.setSyncAggregate(ctx, sBlk)
|
||||
|
||||
// Set execution data. New in Bellatrix.
|
||||
if err := vs.setExecutionData(ctx, sBlk, head); err != nil {
|
||||
blobs, err := vs.setExecutionData(ctx, sBlk, head)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set execution data: %v", err)
|
||||
}
|
||||
|
||||
@@ -131,6 +133,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
|
||||
sr, err := vs.computeStateRoot(ctx, sBlk)
|
||||
if err != nil {
|
||||
interop.WriteBlockToDisk(sBlk, true /*failed*/)
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute state root: %v", err)
|
||||
}
|
||||
sBlk.SetStateRoot(sr)
|
||||
@@ -139,6 +142,46 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not convert block to proto: %v", err)
|
||||
}
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().DenebForkEpoch {
|
||||
blk, ok := pb.(*ethpb.BeaconBlockDeneb)
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.Internal, "Could not cast block to BeaconBlockDeneb")
|
||||
}
|
||||
br, err := blk.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
|
||||
}
|
||||
|
||||
// TODO: Better error handling. If something is wrong with the blob, we don't want to fail block production. Also should check if the kzg commitment matches.
|
||||
validatorBlobs := make([]*ethpb.BlobSidecar, len(blk.Body.BlobKzgCommitments))
|
||||
var gethBlobs types.Blobs
|
||||
for _, b := range blobs {
|
||||
var gethBlob types.Blob
|
||||
copy(gethBlob[:], b.Data)
|
||||
gethBlobs = append(gethBlobs, gethBlob)
|
||||
}
|
||||
commitments, _, proofs, err := gethBlobs.ComputeCommitmentsAndProofs()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute commitments and proofs: %v", err)
|
||||
}
|
||||
for i, b := range blobs {
|
||||
validatorBlobs[i] = ðpb.BlobSidecar{
|
||||
BlockRoot: br[:],
|
||||
Index: uint64(i),
|
||||
Slot: blk.Slot,
|
||||
BlockParentRoot: blk.ParentRoot,
|
||||
ProposerIndex: blk.ProposerIndex,
|
||||
Blob: b,
|
||||
KzgCommitment: commitments[i][:],
|
||||
KzgProof: proofs[i][:],
|
||||
}
|
||||
}
|
||||
blkAndBlobs := ðpb.BeaconBlockDenebAndBlobs{
|
||||
Block: blk,
|
||||
Blobs: validatorBlobs,
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Deneb{Deneb: blkAndBlobs}}, nil
|
||||
}
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
if sBlk.IsBlinded() {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedCapella{BlindedCapella: pb.(*ethpb.BlindedBeaconBlockCapella)}}, nil
|
||||
@@ -162,11 +205,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
|
||||
defer span.End()
|
||||
blk, err := blocks.NewSignedBeaconBlock(req.Block)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Could not decode block: %v", err)
|
||||
}
|
||||
return vs.proposeGenericBeaconBlock(ctx, blk)
|
||||
return vs.proposeGenericBeaconBlock(ctx, req)
|
||||
}
|
||||
|
||||
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
|
||||
@@ -249,9 +288,15 @@ func (vs *Server) GetFeeRecipientByPubKey(ctx context.Context, request *ethpb.Fe
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.proposeGenericBeaconBlock")
|
||||
defer span.End()
|
||||
|
||||
blk, err := blocks.NewSignedBeaconBlock(req.Block)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Could not decode block: %v", err)
|
||||
}
|
||||
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not tree hash block: %v", err)
|
||||
@@ -269,16 +314,6 @@ func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk interfaces.
|
||||
}
|
||||
}
|
||||
|
||||
// Do not block proposal critical path with debug logging or block feed updates.
|
||||
defer func() {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debugf(
|
||||
"Block proposal received via RPC")
|
||||
vs.BlockNotifier.BlockFeed().Send(&feed.Event{
|
||||
Type: blockfeed.ReceivedBlock,
|
||||
Data: &blockfeed.ReceivedBlockData{SignedBlock: blk},
|
||||
})
|
||||
}()
|
||||
|
||||
// Broadcast the new block to the network.
|
||||
blkPb, err := blk.Proto()
|
||||
if err != nil {
|
||||
@@ -295,6 +330,39 @@ func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk interfaces.
|
||||
return nil, fmt.Errorf("could not process beacon block: %v", err)
|
||||
}
|
||||
|
||||
if blk.Version() >= version.Deneb {
|
||||
b, ok := req.GetBlock().(*ethpb.GenericSignedBeaconBlock_Deneb)
|
||||
if !ok {
|
||||
return nil, status.Error(codes.Internal, "Could not cast block to Deneb")
|
||||
}
|
||||
scs := make([]*ethpb.BlobSidecar, len(b.Deneb.Blobs))
|
||||
for i, sidecar := range b.Deneb.Blobs {
|
||||
scs[i] = sidecar.Message
|
||||
// Hehehe skip index 1 and let peers handle it.
|
||||
if sidecar.Message.Index == 1 {
|
||||
continue
|
||||
}
|
||||
if err := vs.P2P.BroadcastBlob(ctx, sidecar.Message.Index, sidecar); err != nil {
|
||||
return nil, errors.Wrap(err, "could not broadcast blob sidecar")
|
||||
}
|
||||
}
|
||||
if len(scs) > 0 {
|
||||
if err := vs.BeaconDB.SaveBlobSidecar(ctx, scs); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save blob sidecar")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Do not block proposal critical path with debug logging or block feed updates.
|
||||
defer func() {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debugf(
|
||||
"Block proposal received via RPC")
|
||||
vs.BlockNotifier.BlockFeed().Send(&feed.Event{
|
||||
Type: blockfeed.ReceivedBlock,
|
||||
Data: &blockfeed.ReceivedBlockData{SignedBlock: blk},
|
||||
})
|
||||
}()
|
||||
|
||||
return ðpb.ProposeResponse{
|
||||
BlockRoot: root[:],
|
||||
}, nil
|
||||
|
||||
@@ -80,7 +80,7 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
|
||||
// filter separates attestation list into two groups: valid and invalid attestations.
|
||||
// The first group passes the all the required checks for attestation to be considered for proposing.
|
||||
// And attestations from the second group should be deleted.
|
||||
func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (proposerAtts, proposerAtts) {
|
||||
func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (proposerAtts, proposerAtts, error) {
|
||||
validAtts := make([]*ethpb.Attestation, 0, len(a))
|
||||
invalidAtts := make([]*ethpb.Attestation, 0, len(a))
|
||||
var attestationProcessor func(context.Context, state.BeaconState, *ethpb.Attestation) (state.BeaconState, error)
|
||||
@@ -98,7 +98,7 @@ func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (propose
|
||||
}
|
||||
} else {
|
||||
// Exit early if there is an unknown state type.
|
||||
return validAtts, invalidAtts
|
||||
return validAtts, invalidAtts, errors.Errorf("unknown state type: %v", st.Version())
|
||||
}
|
||||
|
||||
for _, att := range a {
|
||||
@@ -108,7 +108,7 @@ func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (propose
|
||||
}
|
||||
invalidAtts = append(invalidAtts, att)
|
||||
}
|
||||
return validAtts, invalidAtts
|
||||
return validAtts, invalidAtts, nil
|
||||
}
|
||||
|
||||
// sortByProfitability orders attestations by highest slot and by highest aggregation bit count.
|
||||
@@ -247,7 +247,10 @@ func (vs *Server) validateAndDeleteAttsInPool(ctx context.Context, st state.Beac
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.validateAndDeleteAttsInPool")
|
||||
defer span.End()
|
||||
|
||||
validAtts, invalidAtts := proposerAtts(atts).filter(ctx, st)
|
||||
validAtts, invalidAtts, err := proposerAtts(atts).filter(ctx, st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := vs.deleteAttsInPool(ctx, invalidAtts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -37,11 +37,11 @@ var builderGetPayloadMissCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
const blockBuilderTimeout = 1 * time.Second
|
||||
|
||||
// Sets the execution data for the block. Execution data can come from local EL client or remote builder depends on validator registration and circuit breaker conditions.
|
||||
func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, headState state.BeaconState) error {
|
||||
func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, headState state.BeaconState) ([]*enginev1.Blob, error) {
|
||||
idx := blk.Block().ProposerIndex()
|
||||
slot := blk.Block().Slot()
|
||||
if slots.ToEpoch(slot) < params.BeaconConfig().BellatrixForkEpoch {
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
canUseBuilder, err := vs.canUseBuilder(ctx, slot, idx)
|
||||
@@ -55,14 +55,14 @@ func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.SignedBea
|
||||
} else {
|
||||
switch {
|
||||
case blk.Version() >= version.Capella:
|
||||
localPayload, err := vs.getExecutionPayload(ctx, slot, idx, blk.Block().ParentRoot(), headState)
|
||||
localPayload, _, err := vs.getExecutionPayload(ctx, slot, idx, blk.Block().ParentRoot(), headState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get execution payload")
|
||||
return nil, errors.Wrap(err, "failed to get execution payload")
|
||||
}
|
||||
// Compare payload values between local and builder. Default to the local value if it is higher.
|
||||
localValue, err := localPayload.Value()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get local payload value")
|
||||
return nil, errors.Wrap(err, "failed to get local payload value")
|
||||
}
|
||||
builderValue, err := builderPayload.Value()
|
||||
if err != nil {
|
||||
@@ -71,7 +71,7 @@ func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.SignedBea
|
||||
|
||||
withdrawalsMatched, err := matchingWithdrawalsRoot(localPayload, builderPayload)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to match withdrawals root")
|
||||
return nil, errors.Wrap(err, "failed to match withdrawals root")
|
||||
}
|
||||
// If we can't get the builder value, just use local block.
|
||||
if builderValue.Cmp(localValue) > 0 && withdrawalsMatched { // Builder value is higher and withdrawals match.
|
||||
@@ -79,31 +79,38 @@ func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.SignedBea
|
||||
if err := blk.SetExecution(builderPayload); err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to set builder payload")
|
||||
} else {
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"localValue": localValue,
|
||||
"builderValue": builderValue,
|
||||
}).Warn("Proposer: using local execution payload because higher value")
|
||||
return blk.SetExecution(localPayload)
|
||||
return nil, blk.SetExecution(localPayload)
|
||||
default: // Bellatrix case.
|
||||
blk.SetBlinded(true)
|
||||
if err := blk.SetExecution(builderPayload); err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to set builder payload")
|
||||
} else {
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
executionData, err := vs.getExecutionPayload(ctx, slot, idx, blk.Block().ParentRoot(), headState)
|
||||
executionData, blobsBundle, err := vs.getExecutionPayload(ctx, slot, idx, blk.Block().ParentRoot(), headState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get execution payload")
|
||||
return nil, errors.Wrap(err, "failed to get execution payload")
|
||||
}
|
||||
return blk.SetExecution(executionData)
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch && len(blobsBundle.KzgCommitments) > 0 {
|
||||
// TODO: check block hash matches blob bundle hash
|
||||
if err := blk.SetBlobKzgCommitments(blobsBundle.KzgCommitments); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set blob kzg commitments")
|
||||
}
|
||||
return blobsBundle.Blobs, blk.SetExecution(executionData)
|
||||
}
|
||||
|
||||
return nil, blk.SetExecution(executionData)
|
||||
}
|
||||
|
||||
// This function retrieves the payload header given the slot number and the validator index.
|
||||
@@ -187,17 +194,17 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot primitiv
|
||||
"builderPubKey": fmt.Sprintf("%#x", bid.Pubkey()),
|
||||
"blockHash": fmt.Sprintf("%#x", header.BlockHash()),
|
||||
}).Info("Received header with bid")
|
||||
|
||||
return header, nil
|
||||
}
|
||||
|
||||
// This function retrieves the full payload block using the input blind block. This input must be versioned as
|
||||
// bellatrix blind block. The output block will contain the full payload. The original header block
|
||||
// will be returned the block builder is not configured.
|
||||
func (vs *Server) unblindBuilderBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
func (vs *Server) unblindBuilderBlock(ctx context.Context, b interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, error) {
|
||||
if err := consensusblocks.BeaconBlockIsNil(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// No-op if the input block is not version blind and bellatrix.
|
||||
if b.Version() != version.Bellatrix || !b.IsBlinded() {
|
||||
return b, nil
|
||||
|
||||
@@ -70,7 +70,8 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
t.Run("No builder configured. Use local block", func(t *testing.T) {
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
|
||||
_, err = vs.setExecutionData(context.Background(), blk, capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
e, err := blk.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), e.BlockNumber()) // Local block
|
||||
@@ -117,11 +118,12 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockBellatrix())
|
||||
require.NoError(t, err)
|
||||
chain := &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New(), Genesis: time.Now(), Block: wb}
|
||||
vs.ForkFetcher = chain
|
||||
vs.ForkFetcher.ForkChoicer().SetGenesisTime(uint64(time.Now().Unix()))
|
||||
vs.ForkchoiceFetcher = chain
|
||||
vs.ForkchoiceFetcher.SetForkChoiceGenesisTime(uint64(time.Now().Unix()))
|
||||
vs.TimeFetcher = chain
|
||||
vs.HeadFetcher = chain
|
||||
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
|
||||
_, err = vs.setExecutionData(context.Background(), blk, capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
e, err := blk.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), e.BlockNumber()) // Local block because incorrect withdrawals
|
||||
@@ -171,10 +173,11 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
chain := &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New(), Genesis: time.Now(), Block: wb}
|
||||
vs.ForkFetcher = chain
|
||||
vs.ForkFetcher.ForkChoicer().SetGenesisTime(uint64(time.Now().Unix()))
|
||||
vs.ForkchoiceFetcher.SetForkChoiceGenesisTime(uint64(time.Now().Unix()))
|
||||
vs.TimeFetcher = chain
|
||||
vs.HeadFetcher = chain
|
||||
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
|
||||
_, err = vs.setExecutionData(context.Background(), blk, capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
e, err := blk.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), e.BlockNumber()) // Builder block
|
||||
@@ -183,7 +186,8 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
|
||||
require.NoError(t, err)
|
||||
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: big.NewInt(3)}
|
||||
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
|
||||
_, err = vs.setExecutionData(context.Background(), blk, capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
e, err := blk.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(3), e.BlockNumber()) // Local block
|
||||
@@ -195,7 +199,8 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
ErrGetHeader: errors.New("fault"),
|
||||
}
|
||||
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 4}, BlockValue: big.NewInt(0)}
|
||||
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
|
||||
_, err = vs.setExecutionData(context.Background(), blk, capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
e, err := blk.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(4), e.BlockNumber()) // Local block
|
||||
@@ -366,10 +371,10 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blk interfaces.ReadOnlySignedBeaconBlock
|
||||
blk interfaces.SignedBeaconBlock
|
||||
mock *builderTest.MockBuilderService
|
||||
err string
|
||||
returnedBlk interfaces.ReadOnlySignedBeaconBlock
|
||||
returnedBlk interfaces.SignedBeaconBlock
|
||||
}{
|
||||
{
|
||||
name: "nil block",
|
||||
@@ -378,12 +383,12 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "old block version",
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk: func() interfaces.SignedBeaconBlock {
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
}(),
|
||||
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
returnedBlk: func() interfaces.SignedBeaconBlock {
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
@@ -391,7 +396,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "not configured",
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk: func() interfaces.SignedBeaconBlock {
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
@@ -399,7 +404,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
mock: &builderTest.MockBuilderService{
|
||||
HasConfigured: false,
|
||||
},
|
||||
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
returnedBlk: func() interfaces.SignedBeaconBlock {
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
@@ -407,7 +412,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "submit blind block error",
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk: func() interfaces.SignedBeaconBlock {
|
||||
b := util.NewBlindedBeaconBlockBellatrix()
|
||||
b.Block.Slot = 1
|
||||
b.Block.ProposerIndex = 2
|
||||
@@ -424,7 +429,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "head and payload root mismatch",
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk: func() interfaces.SignedBeaconBlock {
|
||||
b := util.NewBlindedBeaconBlockBellatrix()
|
||||
b.Block.Slot = 1
|
||||
b.Block.ProposerIndex = 2
|
||||
@@ -436,7 +441,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
HasConfigured: true,
|
||||
Payload: p,
|
||||
},
|
||||
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
returnedBlk: func() interfaces.SignedBeaconBlock {
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
b.Block.Slot = 1
|
||||
b.Block.ProposerIndex = 2
|
||||
@@ -449,7 +454,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "can get payload",
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk: func() interfaces.SignedBeaconBlock {
|
||||
b := util.NewBlindedBeaconBlockBellatrix()
|
||||
b.Block.Slot = 1
|
||||
b.Block.ProposerIndex = 2
|
||||
@@ -475,7 +480,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
HasConfigured: true,
|
||||
Payload: p,
|
||||
},
|
||||
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
returnedBlk: func() interfaces.SignedBeaconBlock {
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
b.Block.Slot = 1
|
||||
b.Block.ProposerIndex = 2
|
||||
|
||||
@@ -41,15 +41,12 @@ func (vs *Server) validatorRegistered(ctx context.Context, id primitives.Validat
|
||||
|
||||
// circuitBreakBuilder returns true if the builder is not allowed to be used due to circuit breaker conditions.
|
||||
func (vs *Server) circuitBreakBuilder(s primitives.Slot) (bool, error) {
|
||||
if vs.ForkFetcher == nil || vs.ForkFetcher.ForkChoicer() == nil {
|
||||
if vs.ForkchoiceFetcher == nil {
|
||||
return true, errors.New("no fork choicer configured")
|
||||
}
|
||||
|
||||
vs.ForkFetcher.ForkChoicer().RLock()
|
||||
defer vs.ForkFetcher.ForkChoicer().RUnlock()
|
||||
|
||||
// Circuit breaker is active if the missing consecutive slots greater than `MaxBuilderConsecutiveMissedSlots`.
|
||||
highestReceivedSlot := vs.ForkFetcher.ForkChoicer().HighestReceivedBlockSlot()
|
||||
highestReceivedSlot := vs.ForkchoiceFetcher.HighestReceivedBlockSlot()
|
||||
maxConsecutiveSkipSlotsAllowed := params.BeaconConfig().MaxBuilderConsecutiveMissedSlots
|
||||
diff, err := s.SafeSubSlot(highestReceivedSlot)
|
||||
if err != nil {
|
||||
@@ -71,7 +68,7 @@ func (vs *Server) circuitBreakBuilder(s primitives.Slot) (bool, error) {
|
||||
}
|
||||
|
||||
// Circuit breaker is active if the missing slots per epoch (rolling window) greater than `MaxBuilderEpochMissedSlots`.
|
||||
receivedCount, err := vs.ForkFetcher.ForkChoicer().ReceivedBlocksLastEpoch()
|
||||
receivedCount, err := vs.ForkchoiceFetcher.ReceivedBlocksLastEpoch()
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
@@ -26,8 +26,8 @@ func TestServer_circuitBreakBuilder(t *testing.T) {
|
||||
_, err := s.circuitBreakBuilder(0)
|
||||
require.ErrorContains(t, "no fork choicer configured", err)
|
||||
|
||||
s.ForkFetcher = &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New()}
|
||||
s.ForkFetcher.ForkChoicer().SetGenesisTime(uint64(time.Now().Unix()))
|
||||
s.ForkchoiceFetcher = &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New()}
|
||||
s.ForkchoiceFetcher.SetForkChoiceGenesisTime(uint64(time.Now().Unix()))
|
||||
b, err := s.circuitBreakBuilder(params.BeaconConfig().MaxBuilderConsecutiveMissedSlots + 1)
|
||||
require.NoError(
|
||||
t,
|
||||
@@ -41,7 +41,7 @@ func TestServer_circuitBreakBuilder(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
st, blkRoot, err := createState(1, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.ForkFetcher.ForkChoicer().InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, s.ForkchoiceFetcher.InsertNode(ctx, st, blkRoot))
|
||||
b, err = s.circuitBreakBuilder(params.BeaconConfig().MaxBuilderConsecutiveMissedSlots)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, b)
|
||||
@@ -52,7 +52,7 @@ func TestServer_circuitBreakBuilder(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
st, blkRoot, err = createState(params.BeaconConfig().SlotsPerEpoch, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.ForkFetcher.ForkChoicer().InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, s.ForkchoiceFetcher.InsertNode(ctx, st, blkRoot))
|
||||
b, err = s.circuitBreakBuilder(params.BeaconConfig().SlotsPerEpoch + 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, b)
|
||||
@@ -62,7 +62,7 @@ func TestServer_circuitBreakBuilder(t *testing.T) {
|
||||
for i := primitives.Slot(2); i <= want+2; i++ {
|
||||
st, blkRoot, err = createState(i, [32]byte{byte(i)}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.ForkFetcher.ForkChoicer().InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, s.ForkchoiceFetcher.InsertNode(ctx, st, blkRoot))
|
||||
}
|
||||
b, err = s.circuitBreakBuilder(params.BeaconConfig().SlotsPerEpoch + 1)
|
||||
require.NoError(t, err)
|
||||
@@ -99,8 +99,8 @@ func TestServer_canUseBuilder(t *testing.T) {
|
||||
proposerServer := &Server{}
|
||||
ctx := context.Background()
|
||||
|
||||
proposerServer.ForkFetcher = &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New()}
|
||||
proposerServer.ForkFetcher.ForkChoicer().SetGenesisTime(uint64(time.Now().Unix()))
|
||||
proposerServer.ForkchoiceFetcher = &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New()}
|
||||
proposerServer.ForkchoiceFetcher.SetForkChoiceGenesisTime(uint64(time.Now().Unix()))
|
||||
reg, err := proposerServer.canUseBuilder(ctx, params.BeaconConfig().MaxBuilderConsecutiveMissedSlots+1, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, reg)
|
||||
|
||||
@@ -35,7 +35,7 @@ func (vs *Server) setBlsToExecData(blk interfaces.SignedBeaconBlock, headState s
|
||||
}
|
||||
}
|
||||
|
||||
func (vs *Server) unblindBuilderBlockCapella(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
func (vs *Server) unblindBuilderBlockCapella(ctx context.Context, b interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, error) {
|
||||
if err := consensusblocks.BeaconBlockIsNil(b); err != nil {
|
||||
return nil, errors.Wrap(err, "block is nil")
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user