mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
334 Commits
update-lib
...
blobs-by-r
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
61c7e57547 | ||
|
|
a79a3f5d2e | ||
|
|
334383355c | ||
|
|
461d762f54 | ||
|
|
5819c2e086 | ||
|
|
26604b4c5b | ||
|
|
a3129daa03 | ||
|
|
14cc00720e | ||
|
|
46da6208d1 | ||
|
|
6401755efd | ||
|
|
4541baafe3 | ||
|
|
78175ee0fd | ||
|
|
a34cc7f7bf | ||
|
|
6f22cd7963 | ||
|
|
5c234c8c68 | ||
|
|
f92d492e33 | ||
|
|
a926028e45 | ||
|
|
49f0c44dfe | ||
|
|
8aec170f9b | ||
|
|
cc764c346b | ||
|
|
7d5d30ac94 | ||
|
|
f6eb42b761 | ||
|
|
39fe29d8f4 | ||
|
|
81fbfceea8 | ||
|
|
58967e4516 | ||
|
|
947c9fbe60 | ||
|
|
a0ff5ff792 | ||
|
|
4528ea8d0d | ||
|
|
00b9e484e5 | ||
|
|
8cc1e67e6c | ||
|
|
2f3deac8b0 | ||
|
|
5eaa152589 | ||
|
|
6d3ff65635 | ||
|
|
83a294c1a5 | ||
|
|
753e285fb6 | ||
|
|
525d3b05a6 | ||
|
|
4f38ba38b7 | ||
|
|
eb0b5a6146 | ||
|
|
aa44ba40ab | ||
|
|
8b268d646c | ||
|
|
4356cbc352 | ||
|
|
0893821e35 | ||
|
|
d6ecadb471 | ||
|
|
c3346fefa7 | ||
|
|
d639a26bbe | ||
|
|
9a8bde448e | ||
|
|
ce71b3b6b1 | ||
|
|
bb95d951cc | ||
|
|
5fa30bf73a | ||
|
|
b93aba6126 | ||
|
|
51ed80df69 | ||
|
|
a614c4ac8c | ||
|
|
7b777a10a5 | ||
|
|
bf1ab9951f | ||
|
|
5cea2b3855 | ||
|
|
ab63757fe5 | ||
|
|
736ed1e003 | ||
|
|
6ee9707fd7 | ||
|
|
e40835b1a9 | ||
|
|
216a420bbc | ||
|
|
4845abecb8 | ||
|
|
08a910e44f | ||
|
|
67ba2c4fe3 | ||
|
|
7efa501bdc | ||
|
|
879a694ab3 | ||
|
|
4e3b1881ed | ||
|
|
35d3707de7 | ||
|
|
1f468bd3f5 | ||
|
|
ac32098c86 | ||
|
|
e0af005c42 | ||
|
|
f08af1bdbf | ||
|
|
6d0420fde5 | ||
|
|
5172e6e362 | ||
|
|
42df0f70b6 | ||
|
|
dace0f6a2d | ||
|
|
26a5878181 | ||
|
|
db6474a3e4 | ||
|
|
b84851fd0d | ||
|
|
673702c100 | ||
|
|
520eb6baca | ||
|
|
e6047dc344 | ||
|
|
d86a452b15 | ||
|
|
67f9d0b9c4 | ||
|
|
21cd055b84 | ||
|
|
9f3bb623ec | ||
|
|
b10a95097e | ||
|
|
4561f5cacb | ||
|
|
50b672a4db | ||
|
|
ffbb73a59b | ||
|
|
649974f14d | ||
|
|
9ec0bc0734 | ||
|
|
9649e49658 | ||
|
|
49fdcb7347 | ||
|
|
cd6ee956ed | ||
|
|
ef95fd33f8 | ||
|
|
1a488241b0 | ||
|
|
5fdd3a3d66 | ||
|
|
b6a32c050f | ||
|
|
055e225093 | ||
|
|
144218cb1b | ||
|
|
13b575a609 | ||
|
|
b5a414eae9 | ||
|
|
b94b347ace | ||
|
|
f5ee225819 | ||
|
|
9cb48be14f | ||
|
|
85fa9951eb | ||
|
|
ec72575fc9 | ||
|
|
d9d1bb6d3d | ||
|
|
ffcdc26618 | ||
|
|
96981a07b9 | ||
|
|
6b2721b239 | ||
|
|
c79151a574 | ||
|
|
4b20234801 | ||
|
|
911048aa6d | ||
|
|
255e9693ee | ||
|
|
61c1216e3d | ||
|
|
17e1eaf0f3 | ||
|
|
9940943595 | ||
|
|
9a0f941870 | ||
|
|
5d0f54d332 | ||
|
|
d602c94b7b | ||
|
|
6a5ecbd68f | ||
|
|
29dfcab505 | ||
|
|
16e5c903cc | ||
|
|
66682cb4e5 | ||
|
|
52faea8b7d | ||
|
|
8a78315682 | ||
|
|
cab42a4ae3 | ||
|
|
a5bdd42bdd | ||
|
|
a26197f919 | ||
|
|
8b9cab457e | ||
|
|
080ce31395 | ||
|
|
7866e8a196 | ||
|
|
d5d17e00b3 | ||
|
|
9c6a1331cf | ||
|
|
d89c97634c | ||
|
|
7e95ca3705 | ||
|
|
abd46b01b7 | ||
|
|
8629ac8417 | ||
|
|
304925aabf | ||
|
|
16d93e47a5 | ||
|
|
6dcb2bbf0d | ||
|
|
deb138959a | ||
|
|
45e6f3bd00 | ||
|
|
55a9e0d51a | ||
|
|
3ddae600fb | ||
|
|
861ede8945 | ||
|
|
93f11f9047 | ||
|
|
56503110dd | ||
|
|
f67d35dffd | ||
|
|
efbca1b5b7 | ||
|
|
2de0ebaf8d | ||
|
|
0815ef94a3 | ||
|
|
092ffa99e5 | ||
|
|
b05b67b264 | ||
|
|
a5c6518c20 | ||
|
|
da048395ce | ||
|
|
f31f7be310 | ||
|
|
e1a2267f86 | ||
|
|
3c9e4ee7f7 | ||
|
|
9ba32c9acd | ||
|
|
d23008452e | ||
|
|
f397cba1e0 | ||
|
|
3eecbb5b1a | ||
|
|
1583e93b48 | ||
|
|
849457df81 | ||
|
|
903cab75ee | ||
|
|
ee108d4aff | ||
|
|
49bcc58762 | ||
|
|
a08baf4a14 | ||
|
|
8c56dfdd46 | ||
|
|
dcdd9af9db | ||
|
|
a464cf5c60 | ||
|
|
cc55c754dc | ||
|
|
2d483ab09f | ||
|
|
d64e10a337 | ||
|
|
1e9ee10674 | ||
|
|
3ac395b39e | ||
|
|
6e26a6f128 | ||
|
|
b512b92a8a | ||
|
|
5ff601a1b9 | ||
|
|
5823054519 | ||
|
|
3d196662bc | ||
|
|
b0601580ef | ||
|
|
c1f29ea651 | ||
|
|
881d1d435a | ||
|
|
d1aae0c941 | ||
|
|
468cc23876 | ||
|
|
d9646a9183 | ||
|
|
279cee42f1 | ||
|
|
57bdb907cc | ||
|
|
15d683c78f | ||
|
|
bf6c8ced7d | ||
|
|
78fb685027 | ||
|
|
a87536eba0 | ||
|
|
3f05395a00 | ||
|
|
85fc57d41e | ||
|
|
1e5976d5ce | ||
|
|
98c0b23350 | ||
|
|
039a0fffba | ||
|
|
90ec640e7a | ||
|
|
10acd31d25 | ||
|
|
4224014fad | ||
|
|
df1e8b33d8 | ||
|
|
cdb4ee42cc | ||
|
|
d29baec77e | ||
|
|
53c189da9b | ||
|
|
277fbce61b | ||
|
|
0adc54b7ff | ||
|
|
1cbd7e9888 | ||
|
|
0a9e1658dd | ||
|
|
31d4a4cd11 | ||
|
|
fbc4e73d31 | ||
|
|
c1d4eaa79d | ||
|
|
760af6428e | ||
|
|
dfa0ccf626 | ||
|
|
7a142cf324 | ||
|
|
1a51fdbd58 | ||
|
|
368a99ec8d | ||
|
|
1c7e734918 | ||
|
|
764d1325bf | ||
|
|
0cf30e9022 | ||
|
|
227b20f368 | ||
|
|
d7d70bc25b | ||
|
|
82f6ddb693 | ||
|
|
9e4e82d2c5 | ||
|
|
9838369fe9 | ||
|
|
6085ad1bfa | ||
|
|
d3851b27df | ||
|
|
d6100dfdcb | ||
|
|
c2144dac86 | ||
|
|
a47ff569a8 | ||
|
|
f8be022ef2 | ||
|
|
4f39e6b685 | ||
|
|
c67b000633 | ||
|
|
02f7443586 | ||
|
|
6275e7df4e | ||
|
|
1b6b52fda1 | ||
|
|
5fa1fd84b9 | ||
|
|
bd0c9f9e8d | ||
|
|
2532bb370c | ||
|
|
12efc6c2c1 | ||
|
|
a6cc9ac9c5 | ||
|
|
031f5845a2 | ||
|
|
b88559726c | ||
|
|
ca6ddf4490 | ||
|
|
3ebb2fce94 | ||
|
|
62f6b07cba | ||
|
|
f956f1ed6e | ||
|
|
1c0fa95053 | ||
|
|
04bf4a1060 | ||
|
|
ae276fd371 | ||
|
|
104bdaed12 | ||
|
|
089a5d6ac2 | ||
|
|
16b0820193 | ||
|
|
4b02267e96 | ||
|
|
746584c453 | ||
|
|
b56daaaca2 | ||
|
|
b7a6fe88ee | ||
|
|
22d1c37b92 | ||
|
|
78a393f825 | ||
|
|
ac8290c1bf | ||
|
|
5d0662b415 | ||
|
|
931e5e10c3 | ||
|
|
c172f838b1 | ||
|
|
c07ae29cd9 | ||
|
|
214c9bfd8b | ||
|
|
716140d64d | ||
|
|
088cb4ef59 | ||
|
|
fa33e93a8e | ||
|
|
d1472fc351 | ||
|
|
5c8c0c31d8 | ||
|
|
7f3c00c7a2 | ||
|
|
c180dab791 | ||
|
|
f24acc21c7 | ||
|
|
40b637849d | ||
|
|
e7db1685df | ||
|
|
eccbfd1011 | ||
|
|
90211f6769 | ||
|
|
edc32ac18e | ||
|
|
fe68e020e3 | ||
|
|
81e1e3544d | ||
|
|
09372d5c35 | ||
|
|
078a89e4ca | ||
|
|
dbc6ae26a6 | ||
|
|
b6f429867a | ||
|
|
09f50660ce | ||
|
|
189825b495 | ||
|
|
441cad58d4 | ||
|
|
1277d08f9e | ||
|
|
e03de47db7 | ||
|
|
764b7ff610 | ||
|
|
307be7694e | ||
|
|
c76ae1ef39 | ||
|
|
d499db7f0e | ||
|
|
a894b9f29a | ||
|
|
902e6b3905 | ||
|
|
ed2d1c7bf9 | ||
|
|
14b73cbd47 | ||
|
|
a39c7aa864 | ||
|
|
170bc9c8ec | ||
|
|
365c01fc29 | ||
|
|
3124785a08 | ||
|
|
60e6306107 | ||
|
|
42ccb7830a | ||
|
|
0bb03b9292 | ||
|
|
ed6fbf1480 | ||
|
|
477cec6021 | ||
|
|
924500d111 | ||
|
|
0677504ef1 | ||
|
|
ca2a7c4d9c | ||
|
|
28606629ad | ||
|
|
c817279464 | ||
|
|
009d6ed8ed | ||
|
|
5cec1282a9 | ||
|
|
340170fd29 | ||
|
|
7ed0cc139a | ||
|
|
2c822213eb | ||
|
|
0894b9591c | ||
|
|
f0ca45f9a2 | ||
|
|
afc48c6485 | ||
|
|
93dce8a0cb | ||
|
|
149ccdaf39 | ||
|
|
c08bb39ffe | ||
|
|
5083d8ab34 | ||
|
|
7552a5dd07 | ||
|
|
c93d68f853 | ||
|
|
2b74db2dce | ||
|
|
cc6c91415d | ||
|
|
6d7d7e0adc | ||
|
|
2105d777f0 | ||
|
|
14338afbdb | ||
|
|
3e8aa4023d | ||
|
|
b443875e66 |
@@ -1 +1 @@
|
||||
5.3.0
|
||||
6.1.0
|
||||
|
||||
@@ -21,7 +21,7 @@ linters:
|
||||
linters-settings:
|
||||
gocognit:
|
||||
# TODO: We should target for < 50
|
||||
min-complexity: 65
|
||||
min-complexity: 69
|
||||
|
||||
output:
|
||||
print-issued-lines: true
|
||||
|
||||
40
WORKSPACE
40
WORKSPACE
@@ -4,15 +4,18 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
|
||||
|
||||
http_archive(
|
||||
name = "bazel_toolchains",
|
||||
sha256 = "8e0633dfb59f704594f19ae996a35650747adc621ada5e8b9fb588f808c89cb0",
|
||||
strip_prefix = "bazel-toolchains-3.7.0",
|
||||
name = "rules_pkg",
|
||||
sha256 = "8c20f74bca25d2d442b327ae26768c02cf3c99e93fad0381f32be9aab1967675",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/releases/download/3.7.0/bazel-toolchains-3.7.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-toolchains/releases/download/3.7.0/bazel-toolchains-3.7.0.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.8.1/rules_pkg-0.8.1.tar.gz",
|
||||
"https://github.com/bazelbuild/rules_pkg/releases/download/0.8.1/rules_pkg-0.8.1.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
|
||||
|
||||
rules_pkg_dependencies()
|
||||
|
||||
http_archive(
|
||||
name = "com_grail_bazel_toolchain",
|
||||
sha256 = "b210fc8e58782ef171f428bfc850ed7179bdd805543ebd1aa144b9c93489134f",
|
||||
@@ -39,10 +42,6 @@ load("@prysm//tools/cross-toolchain:prysm_toolchains.bzl", "configure_prysm_tool
|
||||
|
||||
configure_prysm_toolchains()
|
||||
|
||||
load("@prysm//tools/cross-toolchain:rbe_toolchains_config.bzl", "rbe_toolchains_config")
|
||||
|
||||
rbe_toolchains_config()
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
@@ -87,10 +86,10 @@ http_archive(
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "ae013bf35bd23234d1dea46b079f1e05ba74ac0321423830119d3e787ec73483",
|
||||
sha256 = "dd926a88a564a9246713a9c00b35315f54cbd46b31a26d5d8fb264c07045f05d",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.36.0/rules_go-v0.36.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.36.0/rules_go-v0.36.0.zip",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.38.1/rules_go-v0.38.1.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.38.1/rules_go-v0.38.1.zip",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -165,7 +164,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.19.4",
|
||||
go_version = "1.19.7",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -191,6 +190,21 @@ filegroup(
|
||||
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "eip4881_spec_tests",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "test_data",
|
||||
srcs = glob([
|
||||
"**/*.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "89cb659498c0d196fc9f957f8b849b2e1a5c041c3b2b3ae5432ac5c26944297e",
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.3.0-rc.3"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -103,8 +103,8 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
|
||||
}
|
||||
|
||||
log.Printf("BeaconState slot=%d, Block slot=%d", s.Slot(), b.Block().Slot())
|
||||
log.Printf("BeaconState htr=%#xd, Block state_root=%#x", sr, b.Block().StateRoot())
|
||||
log.Printf("BeaconState latest_block_header htr=%#xd, block htr=%#x", br, realBlockRoot)
|
||||
log.Printf("BeaconState htr=%#x, Block state_root=%#x", sr, b.Block().StateRoot())
|
||||
log.Printf("BeaconState latest_block_header htr=%#x, block htr=%#x", br, realBlockRoot)
|
||||
return &OriginData{
|
||||
st: s,
|
||||
b: b,
|
||||
|
||||
@@ -398,11 +398,7 @@ func populateValidators(cfg *params.BeaconChainConfig, st state.BeaconState, val
|
||||
if err := st.SetValidators(validators); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := st.SetBalances(balances); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return st.SetBalances(balances)
|
||||
}
|
||||
|
||||
func TestDownloadFinalizedData(t *testing.T) {
|
||||
|
||||
@@ -467,7 +467,7 @@ func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, e
|
||||
version := bytesutil.ToBytes4(vSlice)
|
||||
ofs = append(ofs, forks.ForkScheduleEntry{
|
||||
Version: version,
|
||||
Epoch: primitives.Epoch(uint64(epoch)),
|
||||
Epoch: primitives.Epoch(epoch),
|
||||
})
|
||||
}
|
||||
sort.Sort(ofs)
|
||||
|
||||
@@ -45,6 +45,7 @@ go_library(
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
|
||||
@@ -301,7 +301,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
|
||||
var attr payloadattribute.Attributer
|
||||
switch st.Version() {
|
||||
case version.Capella:
|
||||
case version.Capella, version.Deneb:
|
||||
withdrawals, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
|
||||
@@ -2,12 +2,18 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (s *Service) isNewProposer(slot primitives.Slot) bool {
|
||||
@@ -42,67 +48,80 @@ func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.Beaco
|
||||
return headState, newHeadBlock, nil
|
||||
}
|
||||
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot [32]byte, proposingSlot primitives.Slot) error {
|
||||
isNewHead := s.isNewHead(newHeadRoot)
|
||||
isNewProposer := s.isNewProposer(proposingSlot)
|
||||
if !isNewHead && !isNewProposer {
|
||||
if !isNewHead {
|
||||
return nil
|
||||
}
|
||||
|
||||
var headState state.BeaconState
|
||||
var headBlock interfaces.ReadOnlySignedBeaconBlock
|
||||
var headRoot [32]byte
|
||||
var err error
|
||||
|
||||
shouldUpdate := isNewHead
|
||||
if isNewHead && isNewProposer && !features.Get().DisableReorgLateBlocks {
|
||||
if proposingSlot == s.CurrentSlot() {
|
||||
proposerHead := s.ForkChoicer().GetProposerHead()
|
||||
if proposerHead != newHeadRoot {
|
||||
shouldUpdate = false
|
||||
}
|
||||
} else if s.ForkChoicer().ShouldOverrideFCU() {
|
||||
shouldUpdate = false
|
||||
}
|
||||
}
|
||||
if shouldUpdate {
|
||||
headRoot = newHeadRoot
|
||||
headState, headBlock, err = s.getStateAndBlock(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
isNewProposer := s.isNewProposer(proposingSlot)
|
||||
if isNewProposer && !features.Get().DisableReorgLateBlocks {
|
||||
if s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
// We are guaranteed that the head block is the parent
|
||||
// of the incoming block. We do not process the slot
|
||||
// because it will be processed anyway in notifyForkchoiceUpdate
|
||||
headState = s.headState(ctx)
|
||||
headRoot = s.headRoot()
|
||||
headBlock, err = s.headBlock()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head block")
|
||||
}
|
||||
}
|
||||
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headRoot: newHeadRoot,
|
||||
headBlock: headBlock.Block(),
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify forkchoice update")
|
||||
}
|
||||
|
||||
if shouldUpdate {
|
||||
if err := s.saveHead(ctx, newHeadRoot, headBlock, headState); err != nil {
|
||||
log.WithError(err).Error("could not save head")
|
||||
}
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
if err := s.pruneAttsFromPool(headBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.saveHead(ctx, newHeadRoot, headBlock, headState); err != nil {
|
||||
log.WithError(err).Error("could not save head")
|
||||
}
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
if err := s.pruneAttsFromPool(headBlock); err != nil {
|
||||
log.WithError(err).Error("could not prune attestations from pool")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldOverrideFCU checks whether the incoming block is still subject to being
|
||||
// reorged or not by the next proposer.
|
||||
func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitives.Slot) bool {
|
||||
headWeight, err := s.ForkChoicer().Weight(newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||
}
|
||||
currentSlot := s.CurrentSlot()
|
||||
if proposingSlot == currentSlot {
|
||||
proposerHead := s.ForkChoicer().GetProposerHead()
|
||||
if proposerHead != newHeadRoot {
|
||||
return true
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", newHeadRoot),
|
||||
"weight": headWeight,
|
||||
}).Infof("Attempted late block reorg aborted due to attestations at %d seconds",
|
||||
params.BeaconConfig().SecondsPerSlot)
|
||||
lateBlockFailedAttemptSecondThreshold.Inc()
|
||||
} else {
|
||||
if s.ForkChoicer().ShouldOverrideFCU() {
|
||||
return true
|
||||
}
|
||||
secs, err := slots.SecondsSinceSlotStart(currentSlot,
|
||||
uint64(s.genesisTime.Unix()), uint64(time.Now().Unix()))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not compute seconds since slot start")
|
||||
}
|
||||
if secs >= doublylinkedtree.ProcessAttestationsThreshold {
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", newHeadRoot),
|
||||
"weight": headWeight,
|
||||
}).Infof("Attempted late block reorg aborted due to attestations at %d seconds",
|
||||
doublylinkedtree.ProcessAttestationsThreshold)
|
||||
lateBlockFailedAttemptFirstThreshold.Inc()
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
|
||||
testDB "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
@@ -190,3 +191,46 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, r, service.CurrentSlot()+1))
|
||||
|
||||
}
|
||||
|
||||
func TestShouldOverrideFCU(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
require.NoError(t, err)
|
||||
headRoot := [32]byte{'b'}
|
||||
parentRoot := [32]byte{'a'}
|
||||
ojc := ðpb.Checkpoint{}
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, headRoot, parentRoot, [32]byte{}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
|
||||
require.Equal(t, primitives.Slot(2), service.CurrentSlot())
|
||||
require.Equal(t, true, service.shouldOverrideFCU(headRoot, 2))
|
||||
require.LogsDoNotContain(t, hook, "12 seconds")
|
||||
require.Equal(t, false, service.shouldOverrideFCU(parentRoot, 2))
|
||||
require.LogsContain(t, hook, "12 seconds")
|
||||
|
||||
head, err := fcs.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, headRoot, head)
|
||||
|
||||
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 29)
|
||||
require.Equal(t, true, service.shouldOverrideFCU(parentRoot, 3))
|
||||
require.LogsDoNotContain(t, hook, "10 seconds")
|
||||
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 24)
|
||||
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot+10) * time.Second))
|
||||
require.Equal(t, false, service.shouldOverrideFCU(parentRoot, 3))
|
||||
require.LogsContain(t, hook, "10 seconds")
|
||||
}
|
||||
|
||||
@@ -60,7 +60,20 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
log = log.WithField("txCount", len(txs))
|
||||
txsPerSlotCount.Set(float64(len(txs)))
|
||||
}
|
||||
|
||||
if b.Version() == version.Deneb {
|
||||
k, err := b.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("blobCount", len(k))
|
||||
}
|
||||
}
|
||||
if b.Version() == version.Deneb {
|
||||
k, err := b.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("blobCount", len(k))
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
return nil
|
||||
@@ -96,6 +109,7 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"version": version.String(block.Version()),
|
||||
}).Info("Synced new block")
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -111,6 +111,18 @@ var (
|
||||
Name: "beacon_reorgs_total",
|
||||
Help: "Count the number of times beacon chain has a reorg",
|
||||
})
|
||||
LateBlockAttemptedReorgCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacon_late_block_attempted_reorgs",
|
||||
Help: "Count the number of times a proposer served by this beacon has attempted a late block reorg",
|
||||
})
|
||||
lateBlockFailedAttemptFirstThreshold = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacon_failed_reorg_attempts_first_threshold",
|
||||
Help: "Count the number of times a proposer served by this beacon attempted a late block reorg but desisted in the first threshold",
|
||||
})
|
||||
lateBlockFailedAttemptSecondThreshold = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacon_failed_reorg_attempts_second_threshold",
|
||||
Help: "Count the number of times a proposer served by this beacon attempted a late block reorg but desisted in the second threshold",
|
||||
})
|
||||
saveOrphanedAttCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "saved_orphaned_att_total",
|
||||
Help: "Count the number of times an orphaned attestation is saved",
|
||||
@@ -158,10 +170,6 @@ var (
|
||||
Name: "txs_per_slot_count",
|
||||
Help: "Count the number of txs per slot",
|
||||
})
|
||||
missedPayloadIDFilledCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "missed_payload_id_filled_count",
|
||||
Help: "",
|
||||
})
|
||||
onBlockProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "on_block_processing_milliseconds",
|
||||
Help: "Total time in milliseconds to complete a call to onBlock()",
|
||||
|
||||
@@ -152,9 +152,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
if err := s.handleBlockAttestations(ctx, signed.Block(), postState); err != nil {
|
||||
return errors.Wrap(err, "could not handle block's attestations")
|
||||
}
|
||||
if err := s.handleBlockBLSToExecChanges(signed.Block()); err != nil {
|
||||
return errors.Wrap(err, "could not handle block's BLSToExecutionChanges")
|
||||
}
|
||||
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, signed.Block().Body().AttesterSlashings())
|
||||
if isValidPayload {
|
||||
@@ -214,6 +211,8 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
|
||||
}
|
||||
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
// verify conditions for FCU, notifies FCU, and saves the new head.
|
||||
// This function also prunes attestations, other similar operations happen in prunePostBlockOperationPools.
|
||||
if err := s.forkchoiceUpdateWithExecution(ctx, headRoot, s.CurrentSlot()+1); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -447,12 +446,22 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
|
||||
}
|
||||
}
|
||||
}
|
||||
// Save boundary states that will be useful for forkchoice
|
||||
for r, st := range boundaries {
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Also saves the last post state which to be used as pre state for the next batch.
|
||||
lastBR := blockRoots[len(blks)-1]
|
||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
return err
|
||||
}
|
||||
// Insert all nodes but the last one to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes); err != nil {
|
||||
return errors.Wrap(err, "could not insert batch to forkchoice")
|
||||
}
|
||||
// Insert the last block to forkchoice
|
||||
lastBR := blockRoots[len(blks)-1]
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastBR); err != nil {
|
||||
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
|
||||
}
|
||||
@@ -462,17 +471,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
|
||||
for r, st := range boundaries {
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Also saves the last post state which to be used as pre state for the next batch.
|
||||
lastB := blks[len(blks)-1]
|
||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
return err
|
||||
}
|
||||
arg := ¬ifyForkchoiceUpdateArg{
|
||||
headState: preState,
|
||||
headRoot: lastBR,
|
||||
@@ -597,8 +596,8 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interface
|
||||
}
|
||||
|
||||
// This removes the attestations in block `b` from the attestation mem pool.
|
||||
func (s *Service) pruneAttsFromPool(b interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
atts := b.Block().Body().Attestations()
|
||||
func (s *Service) pruneAttsFromPool(headBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
atts := headBlock.Block().Body().Attestations()
|
||||
for _, att := range atts {
|
||||
if helpers.IsAggregated(att) {
|
||||
if err := s.cfg.AttPool.DeleteAggregatedAttestation(att); err != nil {
|
||||
@@ -683,6 +682,8 @@ func (s *Service) fillMissingPayloadIDRoutine(ctx context.Context, stateFeed *ev
|
||||
}()
|
||||
}
|
||||
|
||||
// fillMissingBlockPayloadId is called 4 seconds into the slot and calls FCU if we are proposing next slot
|
||||
// and the cache has been missed
|
||||
func (s *Service) fillMissingBlockPayloadId(ctx context.Context) error {
|
||||
s.ForkChoicer().RLock()
|
||||
highestReceivedSlot := s.cfg.ForkChoiceStore.HighestReceivedBlockSlot()
|
||||
@@ -696,10 +697,10 @@ func (s *Service) fillMissingBlockPayloadId(ctx context.Context) error {
|
||||
if !has || id != [8]byte{} {
|
||||
return nil
|
||||
}
|
||||
missedPayloadIDFilledCount.Inc()
|
||||
s.headLock.RLock()
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
s.headLock.RUnlock()
|
||||
return err
|
||||
}
|
||||
headState := s.headState(ctx)
|
||||
|
||||
@@ -66,6 +66,7 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.ReadOnlyBe
|
||||
// during initial syncing. There's no risk given a state summary object is just a
|
||||
// a subset of the block object.
|
||||
if !s.cfg.BeaconDB.HasStateSummary(ctx, parentRoot) && !s.cfg.BeaconDB.HasBlock(ctx, parentRoot) {
|
||||
log.Errorf("requesting blockroot %#x", bytesutil.Trunc(parentRoot[:]))
|
||||
return errors.New("could not reconstruct parent state")
|
||||
}
|
||||
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition/interop"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
@@ -48,17 +50,17 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
// Apply state transition on the new block.
|
||||
if err := s.onBlock(ctx, blockCopy, blockRoot); err != nil {
|
||||
err := errors.Wrap(err, "could not process block")
|
||||
tracing.AnnotateError(span, err)
|
||||
interop.WriteBlockToDisk("state_transition", blockCopy, true /*failed*/)
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle post block operations such as attestations and exits.
|
||||
if err := s.handlePostBlockOperations(blockCopy.Block()); err != nil {
|
||||
return err
|
||||
// Handle post block operations such as pruning exits and bls messages if incoming block is the head
|
||||
if err := s.prunePostBlockOperationPools(ctx, blockCopy, blockRoot); err != nil {
|
||||
log.WithError(err).Error("Could not prune canonical objects from pool ")
|
||||
}
|
||||
|
||||
// Have we been finalizing? Should we start saving hot states to db?
|
||||
@@ -157,29 +159,40 @@ func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.A
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, []*ethpb.AttesterSlashing{slashing})
|
||||
}
|
||||
|
||||
func (s *Service) handlePostBlockOperations(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
// prunePostBlockOperationPools only runs on new head otherwise should return a nil.
|
||||
func (s *Service) prunePostBlockOperationPools(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock, root [32]byte) error {
|
||||
headRoot, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// By comparing the current headroot, that has already gone through forkchoice,
|
||||
// we can assume that if equal the current block root is canonical.
|
||||
if !bytes.Equal(headRoot, root[:]) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark block exits as seen so we don't include same ones in future blocks.
|
||||
for _, e := range b.Body().VoluntaryExits() {
|
||||
for _, e := range blk.Block().Body().VoluntaryExits() {
|
||||
s.cfg.ExitPool.MarkIncluded(e)
|
||||
}
|
||||
|
||||
// Mark block BLS changes as seen so we don't include same ones in future blocks.
|
||||
if err := s.handleBlockBLSToExecChanges(b); err != nil {
|
||||
if err := s.markIncludedBlockBLSToExecChanges(blk.Block()); err != nil {
|
||||
return errors.Wrap(err, "could not process BLSToExecutionChanges")
|
||||
}
|
||||
|
||||
// Mark attester slashings as seen so we don't include same ones in future blocks.
|
||||
for _, as := range b.Body().AttesterSlashings() {
|
||||
for _, as := range blk.Block().Body().AttesterSlashings() {
|
||||
s.cfg.SlashingPool.MarkIncludedAttesterSlashing(as)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) handleBlockBLSToExecChanges(blk interfaces.ReadOnlyBeaconBlock) error {
|
||||
if blk.Version() < version.Capella {
|
||||
func (s *Service) markIncludedBlockBLSToExecChanges(headBlock interfaces.ReadOnlyBeaconBlock) error {
|
||||
if headBlock.Version() < version.Capella {
|
||||
return nil
|
||||
}
|
||||
changes, err := blk.Body().BLSToExecutionChanges()
|
||||
changes, err := headBlock.Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
|
||||
@@ -357,7 +357,7 @@ func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
|
||||
}
|
||||
blk, err := blocks.NewBeaconBlock(pbb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
require.NoError(t, service.markIncludedBlockBLSToExecChanges(blk))
|
||||
})
|
||||
|
||||
t.Run("Post Capella no changes", func(t *testing.T) {
|
||||
@@ -367,7 +367,7 @@ func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
|
||||
}
|
||||
blk, err := blocks.NewBeaconBlock(pbb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
require.NoError(t, service.markIncludedBlockBLSToExecChanges(blk))
|
||||
})
|
||||
|
||||
t.Run("Post Capella some changes", func(t *testing.T) {
|
||||
@@ -389,7 +389,7 @@ func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
|
||||
|
||||
pool.InsertBLSToExecChange(signedChange)
|
||||
require.Equal(t, true, pool.ValidatorExists(idx))
|
||||
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
|
||||
require.NoError(t, service.markIncludedBlockBLSToExecChanges(blk))
|
||||
require.Equal(t, false, pool.ValidatorExists(idx))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -57,6 +57,11 @@ type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar) error {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
|
||||
24
beacon-chain/cache/depositsnapshot/BUILD.bazel
vendored
24
beacon-chain/cache/depositsnapshot/BUILD.bazel
vendored
@@ -1,4 +1,4 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@@ -19,3 +19,25 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"deposit_tree_snapshot_test.go",
|
||||
"merkle_tree_test.go",
|
||||
"spec_test.go",
|
||||
],
|
||||
data = [
|
||||
"@eip4881_spec_tests//:test_data",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//io/file:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@in_gopkg_yaml_v3//:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -23,8 +23,6 @@ var (
|
||||
ErrInvalidIndex = errors.New("index should be greater than finalizedDeposits - 1")
|
||||
// ErrNoDeposits occurs when the number of deposits is 0.
|
||||
ErrNoDeposits = errors.New("number of deposits should be greater than 0")
|
||||
// ErrNoFinalizedDeposits occurs when the number of finalized deposits is 0.
|
||||
ErrNoFinalizedDeposits = errors.New("number of finalized deposits should be greater than 0")
|
||||
// ErrTooManyDeposits occurs when the number of deposits exceeds the capacity of the tree.
|
||||
ErrTooManyDeposits = errors.New("number of deposits should not be greater than the capacity of the tree")
|
||||
)
|
||||
@@ -62,7 +60,7 @@ func (d *DepositTree) getSnapshot() (DepositTreeSnapshot, error) {
|
||||
return DepositTreeSnapshot{}, ErrEmptyExecutionBlock
|
||||
}
|
||||
var finalized [][32]byte
|
||||
depositCount, _ := d.tree.GetFinalized(finalized)
|
||||
depositCount, finalized := d.tree.GetFinalized(finalized)
|
||||
return fromTreeParts(finalized, depositCount, d.finalizedExecutionBlock)
|
||||
}
|
||||
|
||||
@@ -119,9 +117,6 @@ func (d *DepositTree) getProof(index uint64) ([32]byte, [][32]byte, error) {
|
||||
return [32]byte{}, nil, ErrInvalidMixInLength
|
||||
}
|
||||
finalizedDeposits, _ := d.tree.GetFinalized([][32]byte{})
|
||||
if finalizedDeposits == 0 {
|
||||
return [32]byte{}, nil, ErrNoFinalizedDeposits
|
||||
}
|
||||
if finalizedDeposits != 0 {
|
||||
finalizedDeposits = finalizedDeposits - 1
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ func (ds *DepositTreeSnapshot) CalculateRoot() ([32]byte, error) {
|
||||
}
|
||||
size >>= 1
|
||||
}
|
||||
return sha256.Sum256(append(root[:], bytesutil.Uint64ToBytesLittleEndian(ds.depositCount)...)), nil
|
||||
return sha256.Sum256(append(root[:], bytesutil.Uint64ToBytesLittleEndian32(ds.depositCount)...)), nil
|
||||
}
|
||||
|
||||
// fromTreeParts constructs the deposit tree from pre-existing data.
|
||||
|
||||
54
beacon-chain/cache/depositsnapshot/deposit_tree_snapshot_test.go
vendored
Normal file
54
beacon-chain/cache/depositsnapshot/deposit_tree_snapshot_test.go
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
)
|
||||
|
||||
func TestDepositTreeSnapshot_CalculateRoot(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
finalized int
|
||||
depositCount uint64
|
||||
want [32]byte
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
finalized: 0,
|
||||
depositCount: 0,
|
||||
want: [32]byte{215, 10, 35, 71, 49, 40, 92, 104, 4, 194, 164, 245, 103, 17, 221, 184, 200, 44, 153, 116, 15, 32, 120, 84, 137, 16, 40, 175, 52, 226, 126, 94},
|
||||
},
|
||||
{
|
||||
name: "1 Finalized",
|
||||
finalized: 1,
|
||||
depositCount: 2,
|
||||
want: [32]byte{36, 118, 154, 57, 217, 109, 145, 116, 238, 1, 207, 59, 187, 28, 69, 187, 70, 55, 153, 180, 15, 150, 37, 72, 140, 36, 109, 154, 212, 202, 47, 59},
|
||||
},
|
||||
{
|
||||
name: "many finalised",
|
||||
finalized: 6,
|
||||
depositCount: 20,
|
||||
want: [32]byte{210, 63, 57, 119, 12, 5, 3, 25, 139, 20, 244, 59, 114, 119, 35, 88, 222, 88, 122, 106, 239, 20, 45, 140, 99, 92, 222, 166, 133, 159, 128, 72},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var finalized [][32]byte
|
||||
for i := 0; i < tt.finalized; i++ {
|
||||
finalized = append(finalized, hexString(t, fmt.Sprintf("%064d", i)))
|
||||
}
|
||||
ds := &DepositTreeSnapshot{
|
||||
finalized: finalized,
|
||||
depositCount: tt.depositCount,
|
||||
}
|
||||
root, err := ds.CalculateRoot()
|
||||
require.NoError(t, err)
|
||||
if got := root; !reflect.DeepEqual(got, tt.want) {
|
||||
require.DeepEqual(t, tt.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
141
beacon-chain/cache/depositsnapshot/merkle_tree_test.go
vendored
Normal file
141
beacon-chain/cache/depositsnapshot/merkle_tree_test.go
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
)
|
||||
|
||||
func hexString(t *testing.T, hexStr string) [32]byte {
|
||||
t.Helper()
|
||||
b, err := hex.DecodeString(hexStr)
|
||||
require.NoError(t, err)
|
||||
if len(b) != 32 {
|
||||
assert.Equal(t, 32, len(b), "bad hash length, expected 32")
|
||||
}
|
||||
x := (*[32]byte)(b)
|
||||
return *x
|
||||
}
|
||||
|
||||
func Test_create(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
leaves [][32]byte
|
||||
depth uint64
|
||||
want MerkleTreeNode
|
||||
}{
|
||||
{
|
||||
name: "empty tree",
|
||||
leaves: nil,
|
||||
depth: 0,
|
||||
want: &ZeroNode{},
|
||||
},
|
||||
{
|
||||
name: "zero depth",
|
||||
leaves: [][32]byte{hexString(t, fmt.Sprintf("%064d", 0))},
|
||||
depth: 0,
|
||||
want: &LeafNode{},
|
||||
},
|
||||
{
|
||||
name: "depth of 1",
|
||||
leaves: [][32]byte{hexString(t, fmt.Sprintf("%064d", 0))},
|
||||
depth: 1,
|
||||
want: &InnerNode{&LeafNode{}, &ZeroNode{}},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := create(tt.leaves, tt.depth); !reflect.DeepEqual(got, tt.want) {
|
||||
require.DeepEqual(t, tt.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fromSnapshotParts(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
finalized [][32]byte
|
||||
deposits uint64
|
||||
level uint64
|
||||
want MerkleTreeNode
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
finalized: nil,
|
||||
deposits: 0,
|
||||
level: 0,
|
||||
want: &ZeroNode{},
|
||||
},
|
||||
{
|
||||
name: "single finalized node",
|
||||
finalized: [][32]byte{hexString(t, fmt.Sprintf("%064d", 0))},
|
||||
deposits: 1,
|
||||
level: 0,
|
||||
want: &FinalizedNode{
|
||||
depositCount: 1,
|
||||
hash: [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple deposits and 1 Finalized",
|
||||
finalized: [][32]byte{hexString(t, fmt.Sprintf("%064d", 0))},
|
||||
deposits: 2,
|
||||
level: 4,
|
||||
want: &InnerNode{
|
||||
left: &InnerNode{&InnerNode{&FinalizedNode{depositCount: 2, hash: hexString(t, fmt.Sprintf("%064d", 0))}, &ZeroNode{1}}, &ZeroNode{2}},
|
||||
right: &ZeroNode{3},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tree, err := fromSnapshotParts(tt.finalized, tt.deposits, tt.level)
|
||||
require.NoError(t, err)
|
||||
if got := tree; !reflect.DeepEqual(got, tt.want) {
|
||||
require.DeepEqual(t, tt.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_generateProof(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
leaves uint64
|
||||
}{
|
||||
{
|
||||
name: "1 leaf",
|
||||
leaves: 1,
|
||||
},
|
||||
{
|
||||
name: "4 leaves",
|
||||
leaves: 4,
|
||||
},
|
||||
{
|
||||
name: "10 leaves",
|
||||
leaves: 10,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
testCases, err := readTestCases()
|
||||
require.NoError(t, err)
|
||||
tree := New()
|
||||
for _, c := range testCases[:tt.leaves] {
|
||||
err = tree.pushLeaf(c.DepositDataRoot)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
for i := uint64(0); i < tt.leaves; i++ {
|
||||
leaf, proof := generateProof(tree.tree, i, DepositContractDepth)
|
||||
require.Equal(t, leaf, testCases[i].DepositDataRoot)
|
||||
calcRoot := merkleRootFromBranch(leaf, proof, i)
|
||||
require.Equal(t, tree.tree.GetRoot(), calcRoot)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
355
beacon-chain/cache/depositsnapshot/spec_test.go
vendored
Normal file
355
beacon-chain/cache/depositsnapshot/spec_test.go
vendored
Normal file
@@ -0,0 +1,355 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/bazelbuild/rules_go/go/tools/bazel"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/io/file"
|
||||
eth "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type testCase struct {
|
||||
DepositData depositData `yaml:"deposit_data"`
|
||||
DepositDataRoot [32]byte `yaml:"deposit_data_root"`
|
||||
Eth1Data *eth1Data `yaml:"eth1_data"`
|
||||
BlockHeight uint64 `yaml:"block_height"`
|
||||
Snapshot snapshot `yaml:"snapshot"`
|
||||
}
|
||||
|
||||
func (tc *testCase) UnmarshalYAML(value *yaml.Node) error {
|
||||
raw := struct {
|
||||
DepositData depositData `yaml:"deposit_data"`
|
||||
DepositDataRoot string `yaml:"deposit_data_root"`
|
||||
Eth1Data *eth1Data `yaml:"eth1_data"`
|
||||
BlockHeight string `yaml:"block_height"`
|
||||
Snapshot snapshot `yaml:"snapshot"`
|
||||
}{}
|
||||
err := value.Decode(&raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tc.DepositDataRoot, err = hexStringToByteArray(raw.DepositDataRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tc.DepositData = raw.DepositData
|
||||
tc.Eth1Data = raw.Eth1Data
|
||||
tc.BlockHeight, err = stringToUint64(raw.BlockHeight)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tc.Snapshot = raw.Snapshot
|
||||
return nil
|
||||
}
|
||||
|
||||
type depositData struct {
|
||||
Pubkey []byte `yaml:"pubkey"`
|
||||
WithdrawalCredentials []byte `yaml:"withdrawal_credentials"`
|
||||
Amount uint64 `yaml:"amount"`
|
||||
Signature []byte `yaml:"signature"`
|
||||
}
|
||||
|
||||
func (dd *depositData) UnmarshalYAML(value *yaml.Node) error {
|
||||
raw := struct {
|
||||
Pubkey string `yaml:"pubkey"`
|
||||
WithdrawalCredentials string `yaml:"withdrawal_credentials"`
|
||||
Amount string `yaml:"amount"`
|
||||
Signature string `yaml:"signature"`
|
||||
}{}
|
||||
err := value.Decode(&raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dd.Pubkey, err = hexStringToBytes(raw.Pubkey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dd.WithdrawalCredentials, err = hexStringToBytes(raw.WithdrawalCredentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dd.Amount, err = strconv.ParseUint(raw.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dd.Signature, err = hexStringToBytes(raw.Signature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type eth1Data struct {
|
||||
DepositRoot [32]byte `yaml:"deposit_root"`
|
||||
DepositCount uint64 `yaml:"deposit_count"`
|
||||
BlockHash [32]byte `yaml:"block_hash"`
|
||||
}
|
||||
|
||||
func (ed *eth1Data) UnmarshalYAML(value *yaml.Node) error {
|
||||
raw := struct {
|
||||
DepositRoot string `yaml:"deposit_root"`
|
||||
DepositCount string `yaml:"deposit_count"`
|
||||
BlockHash string `yaml:"block_hash"`
|
||||
}{}
|
||||
err := value.Decode(&raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ed.DepositRoot, err = hexStringToByteArray(raw.DepositRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ed.DepositCount, err = stringToUint64(raw.DepositCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ed.BlockHash, err = hexStringToByteArray(raw.BlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type snapshot struct {
|
||||
DepositTreeSnapshot
|
||||
}
|
||||
|
||||
func (sd *snapshot) UnmarshalYAML(value *yaml.Node) error {
|
||||
raw := struct {
|
||||
Finalized []string `yaml:"finalized"`
|
||||
DepositRoot string `yaml:"deposit_root"`
|
||||
DepositCount string `yaml:"deposit_count"`
|
||||
ExecutionBlockHash string `yaml:"execution_block_hash"`
|
||||
ExecutionBlockHeight string `yaml:"execution_block_height"`
|
||||
}{}
|
||||
err := value.Decode(&raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sd.finalized = make([][32]byte, len(raw.Finalized))
|
||||
for i, finalized := range raw.Finalized {
|
||||
sd.finalized[i], err = hexStringToByteArray(finalized)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
sd.depositRoot, err = hexStringToByteArray(raw.DepositRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sd.depositCount, err = stringToUint64(raw.DepositCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sd.executionBlock.Hash, err = hexStringToByteArray(raw.ExecutionBlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sd.executionBlock.Depth, err = stringToUint64(raw.ExecutionBlockHeight)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readTestCases() ([]testCase, error) {
|
||||
testFolders, err := bazel.ListRunfiles()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, ff := range testFolders {
|
||||
if strings.Contains(ff.ShortPath, "eip4881_spec_tests") &&
|
||||
strings.Contains(ff.ShortPath, "eip-4881/test_cases.yaml") {
|
||||
enc, err := file.ReadFileAsBytes(ff.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var testCases []testCase
|
||||
err = yaml.Unmarshal(enc, &testCases)
|
||||
if err != nil {
|
||||
return []testCase{}, err
|
||||
}
|
||||
return testCases, nil
|
||||
}
|
||||
}
|
||||
return nil, errors.New("spec test file not found")
|
||||
}
|
||||
|
||||
func TestRead(t *testing.T) {
|
||||
tcs, err := readTestCases()
|
||||
require.NoError(t, err)
|
||||
for _, tc := range tcs {
|
||||
t.Log(tc)
|
||||
}
|
||||
}
|
||||
|
||||
func hexStringToByteArray(s string) (b [32]byte, err error) {
|
||||
var raw []byte
|
||||
raw, err = hexStringToBytes(s)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(raw) != 32 {
|
||||
err = errors.New("invalid hex string length")
|
||||
return
|
||||
}
|
||||
copy(b[:], raw[:32])
|
||||
return
|
||||
}
|
||||
|
||||
func hexStringToBytes(s string) (b []byte, err error) {
|
||||
b, err = hex.DecodeString(strings.TrimPrefix(s, "0x"))
|
||||
return
|
||||
}
|
||||
|
||||
func stringToUint64(s string) (uint64, error) {
|
||||
value, err := strconv.ParseUint(s, 10, 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func merkleRootFromBranch(leaf [32]byte, branch [][32]byte, index uint64) [32]byte {
|
||||
root := leaf
|
||||
for i, l := range branch {
|
||||
ithBit := (index >> i) & 0x1
|
||||
if ithBit == 1 {
|
||||
root = sha256.Sum256(append(l[:], root[:]...))
|
||||
} else {
|
||||
root = sha256.Sum256(append(root[:], l[:]...))
|
||||
}
|
||||
}
|
||||
return root
|
||||
}
|
||||
|
||||
func checkProof(t *testing.T, tree *DepositTree, index uint64) {
|
||||
leaf, proof, err := tree.getProof(index)
|
||||
require.NoError(t, err)
|
||||
calcRoot := merkleRootFromBranch(leaf, proof, index)
|
||||
require.Equal(t, tree.getRoot(), calcRoot)
|
||||
}
|
||||
|
||||
func compareProof(t *testing.T, tree1, tree2 *DepositTree, index uint64) {
|
||||
require.Equal(t, tree1.getRoot(), tree2.getRoot())
|
||||
checkProof(t, tree1, index)
|
||||
checkProof(t, tree2, index)
|
||||
}
|
||||
|
||||
func cloneFromSnapshot(t *testing.T, snapshot DepositTreeSnapshot, testCases []testCase) *DepositTree {
|
||||
cp, err := fromSnapshot(snapshot)
|
||||
require.NoError(t, err)
|
||||
for _, c := range testCases {
|
||||
err = cp.pushLeaf(c.DepositDataRoot)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
return &cp
|
||||
}
|
||||
|
||||
func TestDepositCases(t *testing.T) {
|
||||
tree := New()
|
||||
testCases, err := readTestCases()
|
||||
require.NoError(t, err)
|
||||
for _, c := range testCases {
|
||||
err = tree.pushLeaf(c.DepositDataRoot)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalization(t *testing.T) {
|
||||
tree := New()
|
||||
testCases, err := readTestCases()
|
||||
require.NoError(t, err)
|
||||
for _, c := range testCases[:128] {
|
||||
err = tree.pushLeaf(c.DepositDataRoot)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
originalRoot := tree.getRoot()
|
||||
require.DeepEqual(t, testCases[127].Eth1Data.DepositRoot, originalRoot)
|
||||
err = tree.finalize(ð.Eth1Data{
|
||||
DepositRoot: testCases[100].Eth1Data.DepositRoot[:],
|
||||
DepositCount: testCases[100].Eth1Data.DepositCount,
|
||||
BlockHash: testCases[100].Eth1Data.BlockHash[:],
|
||||
}, testCases[100].BlockHeight)
|
||||
require.NoError(t, err)
|
||||
// ensure finalization doesn't change root
|
||||
require.Equal(t, tree.getRoot(), originalRoot)
|
||||
snapshotData, err := tree.getSnapshot()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, testCases[100].Snapshot.DepositTreeSnapshot, snapshotData)
|
||||
// create a copy of the tree from a snapshot by replaying
|
||||
// the deposits after the finalized deposit
|
||||
cp := cloneFromSnapshot(t, snapshotData, testCases[101:128])
|
||||
// ensure original and copy have the same root
|
||||
require.Equal(t, tree.getRoot(), cp.getRoot())
|
||||
// finalize original again to check double finalization
|
||||
err = tree.finalize(ð.Eth1Data{
|
||||
DepositRoot: testCases[105].Eth1Data.DepositRoot[:],
|
||||
DepositCount: testCases[105].Eth1Data.DepositCount,
|
||||
BlockHash: testCases[105].Eth1Data.BlockHash[:],
|
||||
}, testCases[105].BlockHeight)
|
||||
require.NoError(t, err)
|
||||
// root should still be the same
|
||||
require.Equal(t, originalRoot, tree.getRoot())
|
||||
// create a copy of the tree by taking a snapshot again
|
||||
snapshotData, err = tree.getSnapshot()
|
||||
require.NoError(t, err)
|
||||
cp = cloneFromSnapshot(t, snapshotData, testCases[106:128])
|
||||
// create a copy of the tree by replaying ALL deposits from nothing
|
||||
fullTreeCopy := New()
|
||||
for _, c := range testCases[:128] {
|
||||
err = fullTreeCopy.pushLeaf(c.DepositDataRoot)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
for i := 106; i < 128; i++ {
|
||||
compareProof(t, tree, cp, uint64(i))
|
||||
compareProof(t, tree, fullTreeCopy, uint64(i))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSnapshotCases(t *testing.T) {
|
||||
tree := New()
|
||||
testCases, err := readTestCases()
|
||||
require.NoError(t, err)
|
||||
for _, c := range testCases {
|
||||
err = tree.pushLeaf(c.DepositDataRoot)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
for _, c := range testCases {
|
||||
err = tree.finalize(ð.Eth1Data{
|
||||
DepositRoot: c.Eth1Data.DepositRoot[:],
|
||||
DepositCount: c.Eth1Data.DepositCount,
|
||||
BlockHash: c.Eth1Data.BlockHash[:],
|
||||
}, c.BlockHeight)
|
||||
require.NoError(t, err)
|
||||
s, err := tree.getSnapshot()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, c.Snapshot.DepositTreeSnapshot, s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyTreeSnapshot(t *testing.T) {
|
||||
_, err := New().getSnapshot()
|
||||
require.ErrorContains(t, "empty execution block", err)
|
||||
}
|
||||
|
||||
func TestInvalidSnapshot(t *testing.T) {
|
||||
invalidSnapshot := DepositTreeSnapshot{
|
||||
finalized: nil,
|
||||
depositRoot: Zerohashes[0],
|
||||
depositCount: 0,
|
||||
executionBlock: executionBlock{
|
||||
Hash: Zerohashes[0],
|
||||
Depth: 0,
|
||||
},
|
||||
}
|
||||
_, err := fromSnapshot(invalidSnapshot)
|
||||
require.ErrorContains(t, "snapshot root is invalid", err)
|
||||
}
|
||||
@@ -133,6 +133,7 @@ func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload interfaces.
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(payload.ParentHash(), header.BlockHash()) {
|
||||
log.Errorf("parent Hash %#x, header Hash: %#x", bytesutil.Trunc(payload.ParentHash()), bytesutil.Trunc(header.BlockHash()))
|
||||
return ErrInvalidPayloadBlockHash
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -9,6 +9,110 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// UpgradeToDeneb updates inputs a generic state to return the version Deneb state.
|
||||
func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) {
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
currentSyncCommittee, err := state.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSyncCommittee, err := state.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevEpochParticipation, err := state.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentEpochParticipation, err := state.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inactivityScores, err := state.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHeader, err := state.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txRoot, err := payloadHeader.TransactionsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wdRoot, err := payloadHeader.WithdrawalsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wi, err := state.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vi, err := state.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
summarires, err := state.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateDeneb{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: state.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().DenebForkVersion,
|
||||
Epoch: epoch,
|
||||
},
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: [][]byte{},
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
Validators: state.Validators(),
|
||||
Balances: state.Balances(),
|
||||
RandaoMixes: state.RandaoMixes(),
|
||||
Slashings: state.Slashings(),
|
||||
PreviousEpochParticipation: prevEpochParticipation,
|
||||
CurrentEpochParticipation: currentEpochParticipation,
|
||||
JustificationBits: state.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: state.FinalizedCheckpoint(),
|
||||
InactivityScores: inactivityScores,
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payloadHeader.ParentHash(),
|
||||
FeeRecipient: payloadHeader.FeeRecipient(),
|
||||
StateRoot: payloadHeader.StateRoot(),
|
||||
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
|
||||
LogsBloom: payloadHeader.LogsBloom(),
|
||||
PrevRandao: payloadHeader.PrevRandao(),
|
||||
BlockNumber: payloadHeader.BlockNumber(),
|
||||
GasLimit: payloadHeader.GasLimit(),
|
||||
GasUsed: payloadHeader.GasUsed(),
|
||||
Timestamp: payloadHeader.Timestamp(),
|
||||
ExtraData: payloadHeader.ExtraData(),
|
||||
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
|
||||
BlockHash: payloadHeader.BlockHash(),
|
||||
ExcessDataGas: make([]byte, 32),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
},
|
||||
NextWithdrawalIndex: wi,
|
||||
NextWithdrawalValidatorIndex: vi,
|
||||
HistoricalSummaries: summarires,
|
||||
}
|
||||
|
||||
return state_native.InitializeFromProtoUnsafeDeneb(s)
|
||||
}
|
||||
|
||||
// UpgradeToCapella updates a generic state to return the version Capella state.
|
||||
func UpgradeToCapella(state state.BeaconState) (state.BeaconState, error) {
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
@@ -353,7 +353,7 @@ func ProcessRandaoMixesReset(state state.BeaconState) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// ProcessHistoricalDataUpdate processes the updates to historical data during epoch processing.
|
||||
// From Capella onward, per spec,state's historical summaries are updated instead of historical roots.
|
||||
// From Capella onward, per spec, state's historical summaries are updated instead of historical roots.
|
||||
func ProcessHistoricalDataUpdate(state state.BeaconState) (state.BeaconState, error) {
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
nextEpoch := currentEpoch + 1
|
||||
|
||||
@@ -81,6 +81,15 @@ func CanUpgradeToCapella(slot primitives.Slot) bool {
|
||||
return epochStart && capellaEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToDeneb returns true if the input `slot` can upgrade to Deneb.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == DENEB_FORK_EPOCH
|
||||
func CanUpgradeToDeneb(slot primitives.Slot) bool {
|
||||
epochStart := slots.IsEpochStart(slot)
|
||||
DenebEpoch := slots.ToEpoch(slot) == params.BeaconConfig().DenebForkEpoch
|
||||
return epochStart && DenebEpoch
|
||||
}
|
||||
|
||||
// CanProcessEpoch checks the eligibility to process epoch.
|
||||
// The epoch can be processed at the end of the last slot of every epoch.
|
||||
//
|
||||
|
||||
@@ -23,7 +23,6 @@ go_library(
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
@@ -46,6 +45,7 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_protolambda_go_kzg//eth:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
|
||||
@@ -11,12 +11,12 @@ import (
|
||||
)
|
||||
|
||||
// WriteBlockToDisk as a block ssz. Writes to temp directory. Debug!
|
||||
func WriteBlockToDisk(block interfaces.ReadOnlySignedBeaconBlock, failed bool) {
|
||||
func WriteBlockToDisk(prefix string, block interfaces.ReadOnlySignedBeaconBlock, failed bool) {
|
||||
if !features.Get().WriteSSZStateTransitions {
|
||||
return
|
||||
}
|
||||
|
||||
filename := fmt.Sprintf("beacon_block_%d.ssz", block.Block().Slot())
|
||||
filename := fmt.Sprintf(prefix+"_beacon_block_%d.ssz", block.Block().Slot())
|
||||
if failed {
|
||||
filename = "failed_" + filename
|
||||
}
|
||||
|
||||
@@ -302,6 +302,14 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if time.CanUpgradeToDeneb(state.Slot()) {
|
||||
state, err = capella.UpgradeToDeneb(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if highestSlot < state.Slot() {
|
||||
|
||||
@@ -6,14 +6,15 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/protolambda/go-kzg/eth"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/altair"
|
||||
b "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition/interop"
|
||||
v "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -57,9 +58,6 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
||||
defer span.End()
|
||||
var err error
|
||||
|
||||
interop.WriteBlockToDisk(signed, false /* Has the block failed */)
|
||||
interop.WriteStateToDisk(st)
|
||||
|
||||
parentRoot := signed.Block().ParentRoot()
|
||||
st, err = ProcessSlotsUsingNextSlotCache(ctx, st, parentRoot[:], signed.Block().Slot())
|
||||
if err != nil {
|
||||
@@ -256,7 +254,7 @@ func ProcessOperationsNoVerifyAttsSigs(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case version.Altair, version.Bellatrix, version.Capella:
|
||||
case version.Altair, version.Bellatrix, version.Capella, version.Deneb:
|
||||
state, err = altairOperations(ctx, state, signedBeaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -356,9 +354,48 @@ func ProcessBlockForStateRoot(
|
||||
return nil, errors.Wrap(err, "process_sync_aggregate failed")
|
||||
}
|
||||
|
||||
if signed.Block().Version() == version.Deneb {
|
||||
err := ValidateBlobKzgs(ctx, signed.Block().Body())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not validate blob kzgs")
|
||||
}
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// ValidateBlobKzgs validates the blob kzgs in the beacon block.
|
||||
//
|
||||
// Spec code:
|
||||
// def process_blob_kzg_commitments(state: BeaconState, body: BeaconBlockBody):
|
||||
//
|
||||
// assert verify_kzg_commitments_against_transactions(body.execution_payload.transactions, body.blob_kzg_commitments)
|
||||
func ValidateBlobKzgs(ctx context.Context, body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
_, span := trace.StartSpan(ctx, "core.state.ValidateBlobKzgs")
|
||||
defer span.End()
|
||||
|
||||
payload, err := body.Execution()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get execution payload from block")
|
||||
}
|
||||
blkKzgs, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob kzg commitments from block")
|
||||
}
|
||||
kzgs := make(eth.KZGCommitmentSequenceImpl, len(blkKzgs))
|
||||
for i := range blkKzgs {
|
||||
kzgs[i] = bytesutil.ToBytes48(blkKzgs[i])
|
||||
}
|
||||
txs, err := payload.Transactions()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get transactions from payload")
|
||||
}
|
||||
if err := eth.VerifyKZGCommitmentsAgainstTransactions(txs, kzgs); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This calls altair block operations.
|
||||
func altairOperations(
|
||||
ctx context.Context,
|
||||
|
||||
@@ -54,6 +54,7 @@ type ReadOnlyDatabase interface {
|
||||
// Fee recipients operations.
|
||||
FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error)
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
|
||||
// origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
|
||||
@@ -818,6 +818,16 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(capellaBlindKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Capella block")
|
||||
}
|
||||
case hasDenebKey(enc):
|
||||
rawBlock = ðpb.SignedBeaconBlockDeneb{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case hasDenebBlindKey(enc):
|
||||
rawBlock = ðpb.SignedBlindedBeaconBlockDeneb{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(denebBlindKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Deneb block")
|
||||
}
|
||||
default:
|
||||
// Marshal block bytes to phase 0 beacon block.
|
||||
rawBlock = ðpb.SignedBeaconBlock{}
|
||||
@@ -854,6 +864,8 @@ func marshalBlockFull(
|
||||
return nil, err
|
||||
}
|
||||
switch blk.Version() {
|
||||
case version.Deneb:
|
||||
return snappy.Encode(nil, append(denebKey, encodedBlock...)), nil
|
||||
case version.Capella:
|
||||
return snappy.Encode(nil, append(capellaKey, encodedBlock...)), nil
|
||||
case version.Bellatrix:
|
||||
@@ -888,6 +900,8 @@ func marshalBlockBlinded(
|
||||
return nil, errors.Wrap(err, "could not marshal blinded block")
|
||||
}
|
||||
switch blk.Version() {
|
||||
case version.Deneb:
|
||||
return snappy.Encode(nil, append(denebBlindKey, encodedBlock...)), nil
|
||||
case version.Capella:
|
||||
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil
|
||||
case version.Bellatrix:
|
||||
|
||||
@@ -37,3 +37,17 @@ func hasCapellaBlindKey(enc []byte) bool {
|
||||
}
|
||||
return bytes.Equal(enc[:len(capellaBlindKey)], capellaBlindKey)
|
||||
}
|
||||
|
||||
func hasDenebKey(enc []byte) bool {
|
||||
if len(denebKey) >= len(enc) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(enc[:len(denebKey)], denebKey)
|
||||
}
|
||||
|
||||
func hasDenebBlindKey(enc []byte) bool {
|
||||
if len(denebBlindKey) >= len(enc) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(enc[:len(denebBlindKey)], denebBlindKey)
|
||||
}
|
||||
|
||||
@@ -129,6 +129,8 @@ var Buckets = [][]byte{
|
||||
|
||||
feeRecipientBucket,
|
||||
registrationBucket,
|
||||
|
||||
blobsBucket,
|
||||
}
|
||||
|
||||
// NewKVStore initializes a new boltDB key-value store at the directory
|
||||
|
||||
@@ -46,6 +46,7 @@ var (
|
||||
finalizedCheckpointKey = []byte("finalized-checkpoint")
|
||||
powchainDataKey = []byte("powchain-data")
|
||||
lastValidatedCheckpointKey = []byte("last-validated-checkpoint")
|
||||
blobsBucket = []byte("blobs")
|
||||
|
||||
// Below keys are used to identify objects are to be fork compatible.
|
||||
// Objects that are only compatible with specific forks should be prefixed with such keys.
|
||||
@@ -55,6 +56,9 @@ var (
|
||||
capellaKey = []byte("capella")
|
||||
capellaBlindKey = []byte("blind-capella")
|
||||
saveBlindedBeaconBlocksKey = []byte("save-blinded-beacon-blocks")
|
||||
denebKey = []byte("deneb")
|
||||
denebBlindKey = []byte("blind-deneb")
|
||||
|
||||
// block root included in the beacon state used by weak subjectivity initial sync
|
||||
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
|
||||
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
|
||||
|
||||
@@ -473,6 +473,19 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
|
||||
}
|
||||
|
||||
switch {
|
||||
case hasDenebKey(enc):
|
||||
protoState := ðpb.BeaconStateDeneb{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal encoding for Deneb")
|
||||
}
|
||||
ok, err := s.isStateValidatorMigrationOver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
protoState.Validators = validatorEntries
|
||||
}
|
||||
return statenative.InitializeFromProtoUnsafeDeneb(protoState)
|
||||
case hasCapellaKey(enc):
|
||||
// Marshal state bytes to capella beacon state.
|
||||
protoState := ðpb.BeaconStateCapella{}
|
||||
@@ -580,6 +593,19 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(capellaKey, rawObj...)), nil
|
||||
case *ethpb.BeaconStateDeneb:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateDeneb)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
if rState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
rawObj, err := rState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(denebKey, rawObj...)), nil
|
||||
default:
|
||||
return nil, errors.New("invalid inner state")
|
||||
}
|
||||
|
||||
@@ -74,5 +74,5 @@ func TestStateSummary_CanDelete(t *testing.T) {
|
||||
require.Equal(t, true, db.HasStateSummary(ctx, r1), "State summary should be saved")
|
||||
|
||||
require.NoError(t, db.deleteStateSummary(r1))
|
||||
require.Equal(t, false, db.HasStateSummary(ctx, r1), "State summary should not be saved")
|
||||
require.Equal(t, false, db.HasStateSummary(ctx, r1), "State summary should be deleted")
|
||||
}
|
||||
|
||||
@@ -37,6 +37,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -107,6 +108,7 @@ go_test(
|
||||
"//beacon-chain/execution/types:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -212,38 +212,6 @@ func TestService_logTtdStatus(t *testing.T) {
|
||||
require.Equal(t, false, reached)
|
||||
}
|
||||
|
||||
func TestService_logTtdStatus_NotSyncedClient(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
resp := (*pb.ExecutionBlock)(nil) // Nil response when a client is not synced
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{
|
||||
cfg: &config{},
|
||||
}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
ttd := new(uint256.Int)
|
||||
reached, err := service.logTtdStatus(context.Background(), ttd.SetUint64(24343))
|
||||
require.ErrorContains(t, "missing required field 'parentHash' for Header", err)
|
||||
require.Equal(t, false, reached)
|
||||
}
|
||||
|
||||
func emptyPayload() *pb.ExecutionPayload {
|
||||
return &pb.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
@@ -34,6 +35,7 @@ const (
|
||||
NewPayloadMethod = "engine_newPayloadV1"
|
||||
// NewPayloadMethodV2 v2 request string for JSON-RPC.
|
||||
NewPayloadMethodV2 = "engine_newPayloadV2"
|
||||
NewPayloadMethodV3 = "engine_newPayloadV3"
|
||||
// ForkchoiceUpdatedMethod v1 request string for JSON-RPC.
|
||||
ForkchoiceUpdatedMethod = "engine_forkchoiceUpdatedV1"
|
||||
// ForkchoiceUpdatedMethodV2 v2 request string for JSON-RPC.
|
||||
@@ -42,12 +44,19 @@ const (
|
||||
GetPayloadMethod = "engine_getPayloadV1"
|
||||
// GetPayloadMethodV2 v2 request string for JSON-RPC.
|
||||
GetPayloadMethodV2 = "engine_getPayloadV2"
|
||||
GetPayloadMethodV3 = "engine_getPayloadV3"
|
||||
// GetBlobsBundleMethod v1 request string for JSON-RPC.
|
||||
GetBlobsBundleMethod = "engine_getBlobsBundleV1"
|
||||
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
|
||||
ExchangeTransitionConfigurationMethod = "engine_exchangeTransitionConfigurationV1"
|
||||
// ExecutionBlockByHashMethod request string for JSON-RPC.
|
||||
ExecutionBlockByHashMethod = "eth_getBlockByHash"
|
||||
// ExecutionBlockByNumberMethod request string for JSON-RPC.
|
||||
ExecutionBlockByNumberMethod = "eth_getBlockByNumber"
|
||||
// GetPayloadBodiesByHashV1 v1 request string for JSON-RPC.
|
||||
GetPayloadBodiesByHashV1 = "engine_getPayloadBodiesByHashV1"
|
||||
// GetPayloadBodiesByRangeV1 v1 request string for JSON-RPC.
|
||||
GetPayloadBodiesByRangeV1 = "engine_getPayloadBodiesByRangeV1"
|
||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||
defaultEngineTimeout = time.Second
|
||||
)
|
||||
@@ -84,6 +93,7 @@ type EngineCaller interface {
|
||||
) error
|
||||
ExecutionBlockByHash(ctx context.Context, hash common.Hash, withTxs bool) (*pb.ExecutionBlock, error)
|
||||
GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error)
|
||||
GetBlobsBundle(ctx context.Context, payloadId [8]byte) (*pb.BlobsBundle, error)
|
||||
}
|
||||
|
||||
var EmptyBlockHash = errors.New("Block hash is empty 0x0000...")
|
||||
@@ -121,6 +131,15 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
case *pb.ExecutionPayloadDeneb:
|
||||
payloadPb, ok := payload.Proto().(*pb.ExecutionPayloadDeneb)
|
||||
if !ok {
|
||||
return nil, errors.New("execution data must be a Deneb execution payload")
|
||||
}
|
||||
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethodV3, payloadPb)
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("unknown execution data type")
|
||||
}
|
||||
@@ -168,7 +187,7 @@ func (s *Service) ForkchoiceUpdated(
|
||||
if err != nil {
|
||||
return nil, nil, handleRPCError(err)
|
||||
}
|
||||
case version.Capella:
|
||||
case version.Capella, version.Deneb:
|
||||
a, err := attrs.PbV2()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -210,6 +229,15 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
|
||||
result := &pb.ExecutionPayloadDenebWithValue{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV3, pb.PayloadIDBytes(payloadId))
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
return blocks.WrappedExecutionPayloadDeneb(result.Payload, big.NewInt(0).SetBytes(result.Value))
|
||||
}
|
||||
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
result := &pb.ExecutionPayloadCapellaWithValue{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV2, pb.PayloadIDBytes(payloadId))
|
||||
@@ -417,6 +445,19 @@ func (s *Service) ExecutionBlocksByHashes(ctx context.Context, hashes []common.H
|
||||
return execBlks, nil
|
||||
}
|
||||
|
||||
// GetBlobsBundle calls the engine_getBlobsV1 method via JSON-RPC.
|
||||
func (s *Service) GetBlobsBundle(ctx context.Context, payloadId [8]byte) (*pb.BlobsBundle, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsBundle")
|
||||
defer span.End()
|
||||
|
||||
d := time.Now().Add(defaultEngineTimeout)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
defer cancel()
|
||||
result := &pb.BlobsBundle{}
|
||||
err := s.rpcClient.CallContext(ctx, result, GetBlobsBundleMethod, pb.PayloadIDBytes(payloadId))
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// HeaderByHash returns the relevant header details for the provided block hash.
|
||||
func (s *Service) HeaderByHash(ctx context.Context, hash common.Hash) (*types.HeaderInfo, error) {
|
||||
var hdr *types.HeaderInfo
|
||||
@@ -437,6 +478,50 @@ func (s *Service) HeaderByNumber(ctx context.Context, number *big.Int) (*types.H
|
||||
return hdr, err
|
||||
}
|
||||
|
||||
// GetPayloadBodiesByHash returns the relevant payload bodies for the provided block hash.
|
||||
func (s *Service) GetPayloadBodiesByHash(ctx context.Context, executionBlockHashes []common.Hash) ([]*pb.ExecutionPayloadBodyV1, error) {
|
||||
if !features.Get().EnableOptionalEngineMethods {
|
||||
return nil, errors.New("optional engine methods not enabled")
|
||||
}
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetPayloadBodiesByHashV1")
|
||||
defer span.End()
|
||||
|
||||
result := make([]*pb.ExecutionPayloadBodyV1, 0)
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetPayloadBodiesByHashV1, executionBlockHashes)
|
||||
|
||||
for i, item := range result {
|
||||
if item == nil {
|
||||
result[i] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*pb.Withdrawal, 0),
|
||||
}
|
||||
}
|
||||
}
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// GetPayloadBodiesByRange returns the relevant payload bodies for the provided range.
|
||||
func (s *Service) GetPayloadBodiesByRange(ctx context.Context, start, count uint64) ([]*pb.ExecutionPayloadBodyV1, error) {
|
||||
if !features.Get().EnableOptionalEngineMethods {
|
||||
return nil, errors.New("optional engine methods not enabled")
|
||||
}
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetPayloadBodiesByRangeV1")
|
||||
defer span.End()
|
||||
|
||||
result := make([]*pb.ExecutionPayloadBodyV1, 0)
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetPayloadBodiesByRangeV1, start, count)
|
||||
|
||||
for i, item := range result {
|
||||
if item == nil {
|
||||
result[i] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*pb.Withdrawal, 0),
|
||||
}
|
||||
}
|
||||
}
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
|
||||
// a beacon block with a full execution payload via the engine API.
|
||||
func (s *Service) ReconstructFullBlock(
|
||||
@@ -669,6 +754,9 @@ func handleRPCError(err error) error {
|
||||
case -38003:
|
||||
errInvalidPayloadAttributesCount.Inc()
|
||||
return ErrInvalidPayloadAttributes
|
||||
case -38004:
|
||||
errRequestTooLargeCount.Inc()
|
||||
return ErrRequestTooLarge
|
||||
case -32000:
|
||||
errServerErrorCount.Inc()
|
||||
// Only -32000 status codes are data errors in the RPC specification.
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
mocks "github.com/prysmaticlabs/prysm/v3/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
@@ -1314,7 +1315,7 @@ func fixtures() map[string]interface{} {
|
||||
ReceiptsRoot: &common.Hash{'d'},
|
||||
LogsBloom: &hexutil.Bytes{'e'},
|
||||
PrevRandao: &common.Hash{'f'},
|
||||
BaseFeePerGas: fmt.Sprintf("%s", "0x123"),
|
||||
BaseFeePerGas: "0x123",
|
||||
BlockHash: &common.Hash{'g'},
|
||||
Transactions: []hexutil.Bytes{{'h'}},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
@@ -1323,7 +1324,7 @@ func fixtures() map[string]interface{} {
|
||||
GasUsed: &hexUint,
|
||||
Timestamp: &hexUint,
|
||||
},
|
||||
BlockValue: fmt.Sprintf("%s", "0x11ff"),
|
||||
BlockValue: "0x11ff",
|
||||
}
|
||||
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
|
||||
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
|
||||
@@ -1817,3 +1818,505 @@ func newPayloadV2Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.Execu
|
||||
service.rpcClient = rpcClient
|
||||
return service
|
||||
}
|
||||
|
||||
func TestCapella_PayloadBodiesByHash(t *testing.T) {
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EnableOptionalEngineMethods: true,
|
||||
})
|
||||
defer resetFn()
|
||||
t.Run("empty response works", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 0)
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("single element response null works", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 1)
|
||||
executionPayloadBodies[0] = nil
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("empty, null, full works", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 3)
|
||||
executionPayloadBodies[0] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
}
|
||||
executionPayloadBodies[1] = nil
|
||||
executionPayloadBodies[2] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{hexutil.MustDecode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")},
|
||||
Withdrawals: []*pb.Withdrawal{{
|
||||
Index: 1,
|
||||
ValidatorIndex: 1,
|
||||
Address: hexutil.MustDecode("0xcf8e0d4e9587369b2301d0790347320302cc0943"),
|
||||
Amount: 1,
|
||||
}},
|
||||
}
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("full works, single item", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 1)
|
||||
executionPayloadBodies[0] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{hexutil.MustDecode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")},
|
||||
Withdrawals: []*pb.Withdrawal{{
|
||||
Index: 1,
|
||||
ValidatorIndex: 1,
|
||||
Address: hexutil.MustDecode("0xcf8e0d4e9587369b2301d0790347320302cc0943"),
|
||||
Amount: 1,
|
||||
}},
|
||||
}
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("full works, multiple items", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 2)
|
||||
executionPayloadBodies[0] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{hexutil.MustDecode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")},
|
||||
Withdrawals: []*pb.Withdrawal{{
|
||||
Index: 1,
|
||||
ValidatorIndex: 1,
|
||||
Address: hexutil.MustDecode("0xcf8e0d4e9587369b2301d0790347320302cc0943"),
|
||||
Amount: 1,
|
||||
}},
|
||||
}
|
||||
executionPayloadBodies[1] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{hexutil.MustDecode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")},
|
||||
Withdrawals: []*pb.Withdrawal{{
|
||||
Index: 2,
|
||||
ValidatorIndex: 1,
|
||||
Address: hexutil.MustDecode("0xcf8e0d4e9587369b2301d0790347320302cc0943"),
|
||||
Amount: 1,
|
||||
}},
|
||||
}
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("returning empty, null, empty should work properly", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
// [A, B, C] but no B in the server means
|
||||
// we get [Abody, null, Cbody].
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 3)
|
||||
executionPayloadBodies[0] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
}
|
||||
executionPayloadBodies[1] = nil
|
||||
executionPayloadBodies[2] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
}
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestCapella_PayloadBodiesByRange(t *testing.T) {
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EnableOptionalEngineMethods: true,
|
||||
})
|
||||
defer resetFn()
|
||||
t.Run("empty response works", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 0)
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByRange(ctx, uint64(1), uint64(2))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("single element response null works", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 1)
|
||||
executionPayloadBodies[0] = nil
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByRange(ctx, uint64(1), uint64(2))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("empty, null, full works", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 3)
|
||||
executionPayloadBodies[0] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
}
|
||||
executionPayloadBodies[1] = nil
|
||||
executionPayloadBodies[2] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{hexutil.MustDecode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")},
|
||||
Withdrawals: []*pb.Withdrawal{{
|
||||
Index: 1,
|
||||
ValidatorIndex: 1,
|
||||
Address: hexutil.MustDecode("0xcf8e0d4e9587369b2301d0790347320302cc0943"),
|
||||
Amount: 1,
|
||||
}},
|
||||
}
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByRange(ctx, uint64(1), uint64(2))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("full works, single item", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 1)
|
||||
executionPayloadBodies[0] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{hexutil.MustDecode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")},
|
||||
Withdrawals: []*pb.Withdrawal{{
|
||||
Index: 1,
|
||||
ValidatorIndex: 1,
|
||||
Address: hexutil.MustDecode("0xcf8e0d4e9587369b2301d0790347320302cc0943"),
|
||||
Amount: 1,
|
||||
}},
|
||||
}
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByRange(ctx, uint64(1), uint64(2))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("full works, multiple items", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 2)
|
||||
executionPayloadBodies[0] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{hexutil.MustDecode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")},
|
||||
Withdrawals: []*pb.Withdrawal{{
|
||||
Index: 1,
|
||||
ValidatorIndex: 1,
|
||||
Address: hexutil.MustDecode("0xcf8e0d4e9587369b2301d0790347320302cc0943"),
|
||||
Amount: 1,
|
||||
}},
|
||||
}
|
||||
executionPayloadBodies[1] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{hexutil.MustDecode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")},
|
||||
Withdrawals: []*pb.Withdrawal{{
|
||||
Index: 2,
|
||||
ValidatorIndex: 1,
|
||||
Address: hexutil.MustDecode("0xcf8e0d4e9587369b2301d0790347320302cc0943"),
|
||||
Amount: 1,
|
||||
}},
|
||||
}
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByRange(ctx, uint64(1), uint64(2))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
t.Run("returning empty, null, empty should work properly", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
// [A, B, C] but no B in the server means
|
||||
// we get [Abody, null, Cbody].
|
||||
executionPayloadBodies := make([]*pb.ExecutionPayloadBodyV1, 3)
|
||||
executionPayloadBodies[0] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
}
|
||||
executionPayloadBodies[1] = nil
|
||||
executionPayloadBodies[2] = &pb.ExecutionPayloadBodyV1{
|
||||
Transactions: [][]byte{},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
}
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": executionPayloadBodies,
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
ctx := context.Background()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
results, err := service.GetPayloadBodiesByRange(ctx, uint64(1), uint64(2))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(results))
|
||||
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -34,4 +34,6 @@ var (
|
||||
ErrInvalidBlockHashPayloadStatus = errors.New("payload status is INVALID_BLOCK_HASH")
|
||||
// ErrNilResponse when the response is nil.
|
||||
ErrNilResponse = errors.New("nil response")
|
||||
// ErrRequestTooLarge when the request is too large
|
||||
ErrRequestTooLarge = errors.New("request too large")
|
||||
)
|
||||
|
||||
@@ -479,7 +479,7 @@ func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
|
||||
func (s *Service) retrieveBlockHashAndTime(ctx context.Context, blkNum *big.Int) ([32]byte, uint64, error) {
|
||||
bHash, err := s.BlockHashByHeight(ctx, blkNum)
|
||||
if err != nil {
|
||||
return [32]byte{}, 0, errors.Wrap(err, "could not get eth1 block hash")
|
||||
return [32]byte{}, 0, nil
|
||||
}
|
||||
if bHash == [32]byte{} {
|
||||
return [32]byte{}, 0, errors.Wrap(err, "got empty block hash")
|
||||
|
||||
@@ -71,4 +71,8 @@ var (
|
||||
Name: "reconstructed_execution_payload_count",
|
||||
Help: "Count the number of execution payloads that are reconstructed using JSON-RPC from payload headers",
|
||||
})
|
||||
errRequestTooLargeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_payload_bodies_count",
|
||||
Help: "The number of requested payload bodies is too large",
|
||||
})
|
||||
)
|
||||
|
||||
@@ -3,6 +3,7 @@ package execution
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -14,7 +15,6 @@ import (
|
||||
contracts "github.com/prysmaticlabs/prysm/v3/contracts/deposit"
|
||||
"github.com/prysmaticlabs/prysm/v3/io/logs"
|
||||
"github.com/prysmaticlabs/prysm/v3/network"
|
||||
"github.com/prysmaticlabs/prysm/v3/network/authorization"
|
||||
)
|
||||
|
||||
func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpoint network.Endpoint) error {
|
||||
@@ -34,7 +34,7 @@ func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpo
|
||||
}
|
||||
s.depositContractCaller = depositContractCaller
|
||||
|
||||
// Ensure we have the correct chain and deposit IDs.
|
||||
//Ensure we have the correct chain and deposit IDs.
|
||||
if err := ensureCorrectExecutionChain(ctx, fetcher); err != nil {
|
||||
client.Close()
|
||||
errStr := err.Error()
|
||||
@@ -113,9 +113,21 @@ func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.End
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headers := http.Header{}
|
||||
for _, h := range s.cfg.headers {
|
||||
if h == "" {
|
||||
continue
|
||||
}
|
||||
keyValue := strings.Split(h, "=")
|
||||
if len(keyValue) < 2 {
|
||||
log.Warnf("Incorrect HTTP header flag format. Skipping %v", keyValue[0])
|
||||
continue
|
||||
}
|
||||
headers.Set(keyValue[0], strings.Join(keyValue[1:], "="))
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "http", "https":
|
||||
client, err = gethRPC.DialOptions(ctx, endpoint.Url, gethRPC.WithHTTPClient(endpoint.HttpClient()))
|
||||
client, err = gethRPC.DialOptions(ctx, endpoint.Url, gethRPC.WithHTTPClient(endpoint.HttpClient()), gethRPC.WithHeaders(headers))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -127,13 +139,6 @@ func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.End
|
||||
default:
|
||||
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
|
||||
}
|
||||
if endpoint.Auth.Method != authorization.None {
|
||||
header, err := endpoint.Auth.ToHeaderValue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.SetHeader("Authorization", header)
|
||||
}
|
||||
for _, h := range s.cfg.headers {
|
||||
if h != "" {
|
||||
keyValue := strings.Split(h, "=")
|
||||
|
||||
@@ -38,6 +38,7 @@ type EngineClient struct {
|
||||
TerminalBlockHash []byte
|
||||
TerminalBlockHashExists bool
|
||||
OverrideValidHash [32]byte
|
||||
BlobsBundle *pb.BlobsBundle
|
||||
BlockValue *big.Int
|
||||
}
|
||||
|
||||
@@ -172,3 +173,8 @@ func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime
|
||||
blk = parentBlk
|
||||
}
|
||||
}
|
||||
|
||||
// GetBlobsBundle --
|
||||
func (e *EngineClient) GetBlobsBundle(ctx context.Context, payloadId [8]byte) (*pb.BlobsBundle, error) {
|
||||
return e.BlobsBundle, nil
|
||||
}
|
||||
|
||||
@@ -16,9 +16,9 @@ import (
|
||||
// consider a block to be late, and thus a candidate to being reorged.
|
||||
const orphanLateBlockFirstThreshold = 4
|
||||
|
||||
// processAttestationsThreshold is the number of seconds after which we
|
||||
// ProcessAttestationsThreshold is the number of seconds after which we
|
||||
// process attestations for the current slot
|
||||
const processAttestationsThreshold = 10
|
||||
const ProcessAttestationsThreshold = 10
|
||||
|
||||
// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
|
||||
// using the current balance stored in each node.
|
||||
@@ -148,7 +148,7 @@ func (n *Node) arrivedEarly(genesisTime uint64) (bool, error) {
|
||||
// slot will have secs = 10 below.
|
||||
func (n *Node) arrivedAfterOrphanCheck(genesisTime uint64) (bool, error) {
|
||||
secs, err := slots.SecondsSinceSlotStart(n.slot, genesisTime, n.timestamp)
|
||||
return secs >= processAttestationsThreshold, err
|
||||
return secs >= ProcessAttestationsThreshold, err
|
||||
}
|
||||
|
||||
// nodeTreeDump appends to the given list all the nodes descending from this one
|
||||
|
||||
@@ -294,7 +294,7 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
require.Equal(t, false, late)
|
||||
|
||||
// very late block
|
||||
driftGenesisTime(f, 3, processAttestationsThreshold+1)
|
||||
driftGenesisTime(f, 3, ProcessAttestationsThreshold+1)
|
||||
root = [32]byte{'c'}
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, root, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -3,6 +3,7 @@ package doublylinkedtree
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
)
|
||||
@@ -41,9 +42,11 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
if head == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if head.slot != slots.CurrentSlot(f.store.genesisTime) {
|
||||
return
|
||||
}
|
||||
|
||||
// Do not reorg on epoch boundaries
|
||||
if (head.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
return
|
||||
@@ -90,6 +93,9 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
// This function needs to be called only when proposing a block and all
|
||||
// attestation processing has already happened.
|
||||
func (f *ForkChoice) GetProposerHead() [32]byte {
|
||||
if features.Get().DisableReorgLateBlocks {
|
||||
return f.CachedHeadRoot()
|
||||
}
|
||||
head := f.store.headNode
|
||||
if head == nil {
|
||||
return [32]byte{}
|
||||
|
||||
@@ -143,18 +143,19 @@ func TestConfigureNetwork_ConfigFile(t *testing.T) {
|
||||
"node2")), 0666))
|
||||
|
||||
require.NoError(t, set.Parse([]string{"test-command", "--" + cmd.ConfigFileFlag.Name, "flags_test.yaml"}))
|
||||
comFlags := cmd.WrapFlags([]cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: cmd.ConfigFileFlag.Name,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: cmd.BootstrapNode.Name,
|
||||
},
|
||||
})
|
||||
command := &cli.Command{
|
||||
Name: "test-command",
|
||||
Flags: cmd.WrapFlags([]cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: cmd.ConfigFileFlag.Name,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: cmd.BootstrapNode.Name,
|
||||
},
|
||||
}),
|
||||
Name: "test-command",
|
||||
Flags: comFlags,
|
||||
Before: func(cliCtx *cli.Context) error {
|
||||
return cmd.LoadFlagsFromConfig(cliCtx, cliCtx.Command.Flags)
|
||||
return cmd.LoadFlagsFromConfig(cliCtx, comFlags)
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
//TODO: https://github.com/urfave/cli/issues/1197 right now does not set flag
|
||||
@@ -165,7 +166,7 @@ func TestConfigureNetwork_ConfigFile(t *testing.T) {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
require.NoError(t, command.Run(context))
|
||||
require.NoError(t, command.Run(context, context.Args().Slice()...))
|
||||
require.NoError(t, os.Remove("flags_test.yaml"))
|
||||
}
|
||||
|
||||
|
||||
@@ -86,7 +86,6 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/muxer/mplex:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/protocol/identify:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
|
||||
@@ -141,6 +141,57 @@ func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *
|
||||
}
|
||||
}
|
||||
|
||||
// BroadcastBlob broadcasts a blob to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input subnet.
|
||||
func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.SignedBlobSidecar) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastBlob")
|
||||
defer span.End()
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not retrieve fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a subnet peer if none available.
|
||||
go s.broadcastBlob(ctx, subnet, blob, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) broadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar, forkDigest [4]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastBlob")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
|
||||
// Ensure we have peers with this subnet.
|
||||
s.subnetLocker(subnet).RLock()
|
||||
hasPeer := s.hasPeerWithSubnet(blobSubnetToTopic(subnet, forkDigest))
|
||||
s.subnetLocker(subnet).RUnlock()
|
||||
|
||||
if !hasPeer {
|
||||
if err := func() error {
|
||||
s.subnetLocker(subnet).Lock()
|
||||
defer s.subnetLocker(subnet).Unlock()
|
||||
ok, err := s.FindPeersWithSubnet(ctx, blobSubnetToTopic(subnet, forkDigest), subnet, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
}(); err != nil {
|
||||
log.WithError(err).Error("Failed to find peers")
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, blobSidecar, blobSubnetToTopic(subnet, forkDigest)); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecar")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [4]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
|
||||
defer span.End()
|
||||
@@ -232,3 +283,7 @@ func attestationToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
@@ -17,7 +17,8 @@ func (s *Service) forkWatcher() {
|
||||
currEpoch := slots.ToEpoch(currSlot)
|
||||
if currEpoch == params.BeaconConfig().AltairForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().BellatrixForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().CapellaForkEpoch {
|
||||
currEpoch == params.BeaconConfig().CapellaForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().DenebForkEpoch {
|
||||
// If we are in the fork epoch, we update our enr with
|
||||
// the updated fork digest. These repeatedly does
|
||||
// this over the epoch, which might be slightly wasteful
|
||||
|
||||
@@ -119,6 +119,9 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
return defaultProposerSlashingTopicParams(), nil
|
||||
case strings.Contains(topic, GossipAttesterSlashingMessage):
|
||||
return defaultAttesterSlashingTopicParams(), nil
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage):
|
||||
// TODO(Deneb): Using the default block scoring. But this should be updated.
|
||||
return defaultBlockTopicParams(), nil
|
||||
case strings.Contains(topic, GossipBlsToExecutionChangeMessage):
|
||||
return defaultBlsToExecutionChangeTopicParams(), nil
|
||||
default:
|
||||
|
||||
@@ -21,12 +21,16 @@ var gossipTopicMappings = map[string]proto.Message{
|
||||
SyncContributionAndProofSubnetTopicFormat: ðpb.SignedContributionAndProof{},
|
||||
SyncCommitteeSubnetTopicFormat: ðpb.SyncCommitteeMessage{},
|
||||
BlsToExecutionChangeSubnetTopicFormat: ðpb.SignedBLSToExecutionChange{},
|
||||
BlobSubnetTopicFormat: ðpb.SignedBlobSidecar{},
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
// versioned by epoch.
|
||||
func GossipTopicMappings(topic string, epoch primitives.Epoch) proto.Message {
|
||||
if topic == BlockSubnetTopicFormat {
|
||||
if epoch >= params.BeaconConfig().DenebForkEpoch {
|
||||
return ðpb.SignedBeaconBlockDeneb{}
|
||||
}
|
||||
if epoch >= params.BeaconConfig().CapellaForkEpoch {
|
||||
return ðpb.SignedBeaconBlockCapella{}
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ type Broadcaster interface {
|
||||
Broadcast(context.Context, proto.Message) error
|
||||
BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error
|
||||
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
|
||||
@@ -57,12 +57,17 @@ func (s *Service) CanSubscribe(topic string) bool {
|
||||
log.WithError(err).Error("Could not determine Capella fork digest")
|
||||
return false
|
||||
}
|
||||
|
||||
denebForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Capella fork digest")
|
||||
return false
|
||||
}
|
||||
switch parts[2] {
|
||||
case fmt.Sprintf("%x", phase0ForkDigest):
|
||||
case fmt.Sprintf("%x", altairForkDigest):
|
||||
case fmt.Sprintf("%x", bellatrixForkDigest):
|
||||
case fmt.Sprintf("%x", capellaForkDigest):
|
||||
case fmt.Sprintf("%x", denebForkDigest):
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -92,7 +92,7 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
formatting := []interface{}{digest}
|
||||
|
||||
// Special case for attestation subnets which have a second formatting placeholder.
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat {
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat {
|
||||
formatting = append(formatting, 0 /* some subnet ID */)
|
||||
}
|
||||
|
||||
|
||||
@@ -37,6 +37,9 @@ const PingMessageName = "/ping"
|
||||
// MetadataMessageName specifies the name for the metadata message topic.
|
||||
const MetadataMessageName = "/metadata"
|
||||
|
||||
const BlobSidecarsByRootName = "/blob_sidecars_by_root"
|
||||
const BlobSidecarsByRangeName = "/blob_sidecars_by_range"
|
||||
|
||||
const (
|
||||
// V1 RPC Topics
|
||||
// RPCStatusTopicV1 defines the v1 topic for the status rpc method.
|
||||
@@ -52,6 +55,15 @@ const (
|
||||
// RPCMetaDataTopicV1 defines the v1 topic for the metadata rpc method.
|
||||
RPCMetaDataTopicV1 = protocolPrefix + MetadataMessageName + SchemaVersionV1
|
||||
|
||||
// RPCBlobSidecarsByRootTopicV1 is a topic for requesting blob sidecars by their block root. New in deneb.
|
||||
// /eth2/beacon_chain/req/blob_sidecars_by_root/1/
|
||||
RPCBlobSidecarsByRootTopicV1 = protocolPrefix + BlobSidecarsByRootName + SchemaVersionV1
|
||||
|
||||
// RPCBlobSidecarsByRangeTopicV1 is a topic for requesting blob sidecars
|
||||
// in the slot range [start_slot, start_slot + count), leading up to the current head block as selected by fork choice.
|
||||
// Protocol ID: /eth2/beacon_chain/req/blob_sidecars_by_range/1/ - New in deneb.
|
||||
RPCBlobSidecarsByRangeTopicV1 = protocolPrefix + BlobSidecarsByRangeName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
|
||||
RPCBlocksByRangeTopicV2 = protocolPrefix + BeaconBlocksByRangeMessageName + SchemaVersionV2
|
||||
@@ -83,6 +95,10 @@ var RPCTopicMappings = map[string]interface{}{
|
||||
// RPC Metadata Message
|
||||
RPCMetaDataTopicV1: new(interface{}),
|
||||
RPCMetaDataTopicV2: new(interface{}),
|
||||
// BlobSidecarsByRange v1 Message
|
||||
RPCBlobSidecarsByRangeTopicV1: new(pb.BlobSidecarsByRangeRequest),
|
||||
// BlobSidecarsByRoot v1 Message
|
||||
RPCBlobSidecarsByRootTopicV1: new(p2ptypes.BlobSidecarsByRootReq),
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
@@ -99,6 +115,8 @@ var messageMapping = map[string]bool{
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
BlobSidecarsByRangeName: true,
|
||||
BlobSidecarsByRootName: true,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in altair.
|
||||
@@ -113,6 +131,15 @@ var versionMapping = map[string]bool{
|
||||
SchemaVersionV2: true,
|
||||
}
|
||||
|
||||
var PreAltairV1SchemaMapping = map[string]bool{
|
||||
StatusMessageName: true,
|
||||
GoodbyeMessageName: true,
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
}
|
||||
|
||||
// VerifyTopicMapping verifies that the topic and its accompanying
|
||||
// message type is correct.
|
||||
func VerifyTopicMapping(topic string, msg interface{}) error {
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/p2p/protocol/identify"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/async"
|
||||
@@ -133,7 +132,6 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
}
|
||||
|
||||
s.host = h
|
||||
s.host.RemoveStreamHandler(identify.IDDelta)
|
||||
// Gossipsub registration is done before we add in any new peers
|
||||
// due to libp2p's gossipsub implementation not taking into
|
||||
// account previously added peers when creating the gossipsub
|
||||
|
||||
@@ -138,6 +138,11 @@ func (_ *FakeP2P) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Att
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastBlob -- fake.
|
||||
func (p *FakeP2P) BroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastSyncCommitteeMessage -- fake.
|
||||
func (_ *FakeP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
return nil
|
||||
|
||||
@@ -33,3 +33,9 @@ func (m *MockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uin
|
||||
m.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastSyncCommitteeMessage records a broadcast occurred.
|
||||
func (m *MockBroadcaster) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.SignedBlobSidecar) error {
|
||||
m.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -51,7 +51,8 @@ func (_ *MockHost) Connect(_ context.Context, _ peer.AddrInfo) error {
|
||||
func (_ *MockHost) SetStreamHandler(_ protocol.ID, _ network.StreamHandler) {}
|
||||
|
||||
// SetStreamHandlerMatch --
|
||||
func (_ *MockHost) SetStreamHandlerMatch(protocol.ID, func(string) bool, network.StreamHandler) {}
|
||||
func (_ *MockHost) SetStreamHandlerMatch(protocol.ID, func(id protocol.ID) bool, network.StreamHandler) {
|
||||
}
|
||||
|
||||
// RemoveStreamHandler --
|
||||
func (_ *MockHost) RemoveStreamHandler(_ protocol.ID) {}
|
||||
|
||||
@@ -170,6 +170,11 @@ func (p *TestP2P) BroadcastAttestation(_ context.Context, _ uint64, _ *ethpb.Att
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TestP2P) BroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar) error {
|
||||
p.BroadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastSyncCommitteeMessage broadcasts a sync committee message.
|
||||
func (p *TestP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
p.BroadcastCalled = true
|
||||
|
||||
@@ -28,6 +28,8 @@ const (
|
||||
GossipContributionAndProofMessage = "sync_committee_contribution_and_proof"
|
||||
// GossipBlsToExecutionChangeMessage is the name for the bls to execution change message type.
|
||||
GossipBlsToExecutionChangeMessage = "bls_to_execution_change"
|
||||
// GossipBlobSidecarMessage is the name for the blob sidecar message type.
|
||||
GossipBlobSidecarMessage = "blob_sidecar"
|
||||
|
||||
// Topic Formats
|
||||
//
|
||||
@@ -49,4 +51,6 @@ const (
|
||||
SyncContributionAndProofSubnetTopicFormat = GossipProtocolAndDigest + GossipContributionAndProofMessage
|
||||
// BlsToExecutionChangeSubnetTopicFormat is the topic format for the bls to execution change subnet.
|
||||
BlsToExecutionChangeSubnetTopicFormat = GossipProtocolAndDigest + GossipBlsToExecutionChangeMessage
|
||||
// BlobSubnetTopicFormat is the topic format for the blob subnet.
|
||||
BlobSubnetTopicFormat = GossipProtocolAndDigest + GossipBlobSidecarMessage + "_%d"
|
||||
)
|
||||
|
||||
@@ -53,6 +53,11 @@ func InitializeDataMaps() {
|
||||
ðpb.SignedBeaconBlockCapella{Block: ðpb.BeaconBlockCapella{Body: ðpb.BeaconBlockBodyCapella{}}},
|
||||
)
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
return blocks.NewSignedBeaconBlock(
|
||||
ðpb.SignedBeaconBlockDeneb{Block: ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{}}},
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our metadata map.
|
||||
@@ -69,5 +74,8 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() metadata.Metadata {
|
||||
return wrapper.WrappedMetadataV1(ðpb.MetaDataV1{})
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,4 +12,5 @@ var (
|
||||
ErrRateLimited = errors.New("rate limited")
|
||||
ErrIODeadline = errors.New("i/o deadline exceeded")
|
||||
ErrInvalidRequest = errors.New("invalid range, step or count")
|
||||
ErrBlobLTMinRequest = errors.New("blob slot < minimum_request_epoch")
|
||||
)
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
eth "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
const rootLength = 32
|
||||
@@ -31,6 +32,7 @@ func (b *SSZBytes) HashTreeRootWith(hh *ssz.Hasher) error {
|
||||
|
||||
// BeaconBlockByRootsReq specifies the block by roots request type.
|
||||
type BeaconBlockByRootsReq [][rootLength]byte
|
||||
type BlobSidecarsByRootReq []*eth.BlobIdentifier
|
||||
|
||||
// MarshalSSZTo marshals the block by roots request with the provided byte slice.
|
||||
func (r *BeaconBlockByRootsReq) MarshalSSZTo(dst []byte) ([]byte, error) {
|
||||
|
||||
@@ -46,6 +46,7 @@ pkg_deb(
|
||||
package = "prysm-beacon-chain",
|
||||
postinst = "postinst.sh",
|
||||
preinst = "preinst.sh",
|
||||
tags = ["no-remote"],
|
||||
version_file = "//runtime:version_file",
|
||||
visibility = ["//beacon-chain:__pkg__"],
|
||||
)
|
||||
|
||||
@@ -211,6 +211,14 @@ func handlePostSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.E
|
||||
return true
|
||||
}
|
||||
|
||||
func handleGetBlobsSidecarSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
|
||||
config := sszConfig{
|
||||
fileName: "blobs_sidecar.ssz",
|
||||
responseJson: &SszResponseJson{},
|
||||
}
|
||||
return handleGetSSZ(m, endpoint, w, req, config)
|
||||
}
|
||||
|
||||
func sszRequested(req *http.Request) (bool, error) {
|
||||
accept := req.Header.Values("Accept")
|
||||
if len(accept) == 0 {
|
||||
|
||||
@@ -503,6 +503,13 @@ type capellaBlockResponseJson struct {
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type denebBlockResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *SignedBeaconBlockDenebContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type bellatrixBlindedBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *SignedBlindedBeaconBlockBellatrixContainerJson `json:"data"`
|
||||
@@ -517,6 +524,12 @@ type capellaBlindedBlockResponseJson struct {
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type denebBlindedBlockResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *SignedBlindedBeaconBlockDenebContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
respContainer, ok := response.(*BlockV2ResponseJson)
|
||||
if !ok {
|
||||
@@ -565,6 +578,16 @@ func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, a
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_Deneb.String())):
|
||||
actualRespContainer = &denebBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBeaconBlockDenebContainerJson{
|
||||
Message: respContainer.Data.DenebBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||
}
|
||||
@@ -624,6 +647,15 @@ func serializeBlindedBlock(response interface{}) (apimiddleware.RunDefault, []by
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
Finalized: respContainer.Finalized,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_Deneb.String())):
|
||||
actualRespContainer = &denebBlindedBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &SignedBlindedBeaconBlockDenebContainerJson{
|
||||
Message: respContainer.Data.DenebBlock,
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
ExecutionOptimistic: respContainer.ExecutionOptimistic,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||
}
|
||||
@@ -655,6 +687,11 @@ type capellaStateResponseJson struct {
|
||||
Data *BeaconStateCapellaJson `json:"data"`
|
||||
}
|
||||
|
||||
type denebStateResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *BeaconStateDenebJson `json:"data"`
|
||||
}
|
||||
|
||||
func serializeV2State(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
respContainer, ok := response.(*BeaconStateV2ResponseJson)
|
||||
if !ok {
|
||||
@@ -683,6 +720,11 @@ func serializeV2State(response interface{}) (apimiddleware.RunDefault, []byte, a
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.CapellaState,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_Deneb.String())):
|
||||
actualRespContainer = &denebStateResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.DenebState,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported state version '%s'", respContainer.Version))
|
||||
}
|
||||
@@ -719,6 +761,40 @@ type bellatrixProduceBlindedBlockResponseJson struct {
|
||||
Data *BlindedBeaconBlockBellatrixJson `json:"data"`
|
||||
}
|
||||
|
||||
type tempBlobJson struct {
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
type tempBlobsSidecarJson struct {
|
||||
BeaconBlockRoot string `json:"beacon_block_root"`
|
||||
BeaconBlockSlot string `json:"beacon_block_slot"`
|
||||
Blobs []tempBlobJson `json:"blobs"`
|
||||
AggregatedProof string `json:"kzg_aggregated_proof"`
|
||||
}
|
||||
|
||||
// This takes the blobs list and exposes the data field of each blob as the blob content itself in the json
|
||||
func prepareBlobsResponse(body []byte, responseContainer interface{}) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
tempContainer := &tempBlobsSidecarJson{}
|
||||
if err := json.Unmarshal(body, tempContainer); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not unmarshal response into temp container")
|
||||
}
|
||||
container, ok := responseContainer.(*blobsSidecarResponseJson)
|
||||
if !ok {
|
||||
return false, apimiddleware.InternalServerError(errors.New("container is not of the correct type"))
|
||||
}
|
||||
|
||||
container.Data = &blobsSidecarJson{
|
||||
BeaconBlockRoot: tempContainer.BeaconBlockRoot,
|
||||
BeaconBlockSlot: tempContainer.BeaconBlockSlot,
|
||||
Blobs: make([]string, len(tempContainer.Blobs)),
|
||||
AggregatedProof: tempContainer.AggregatedProof,
|
||||
}
|
||||
for i, blob := range tempContainer.Blobs {
|
||||
container.Data.Blobs[i] = blob.Data
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
type capellaProduceBlindedBlockResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *BlindedBeaconBlockCapellaJson `json:"data"`
|
||||
|
||||
@@ -75,6 +75,7 @@ func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
"/eth/v1/validator/prepare_beacon_proposer",
|
||||
"/eth/v1/validator/register_validator",
|
||||
"/eth/v1/validator/liveness/{epoch}",
|
||||
"/eth/v1/beacon/blobs_sidecars/{block_id}",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,6 +137,12 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
OnPreSerializeMiddlewareResponseIntoJson: serializeV2Block,
|
||||
}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBeaconBlockSSZV2}
|
||||
case "/eth/v1/beacon/blobs_sidecars/{block_id}":
|
||||
endpoint.GetResponse = &blobsSidecarResponseJson{}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBlobsSidecarSSZ}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeGrpcResponseBodyIntoContainer: prepareBlobsResponse,
|
||||
}
|
||||
case "/eth/v1/beacon/blocks/{block_id}/root":
|
||||
endpoint.GetResponse = &BlockRootResponseJson{}
|
||||
case "/eth/v1/beacon/blocks/{block_id}/attestations":
|
||||
|
||||
@@ -395,6 +395,7 @@ type SignedBeaconBlockContainerV2Json struct {
|
||||
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
||||
BellatrixBlock *BeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
CapellaBlock *BeaconBlockCapellaJson `json:"capella_block"`
|
||||
DenebBlock *BeaconBlockDenebJson `json:"deneb_block"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
@@ -403,6 +404,7 @@ type SignedBlindedBeaconBlockContainerJson struct {
|
||||
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
||||
BellatrixBlock *BlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
CapellaBlock *BlindedBeaconBlockCapellaJson `json:"capella_block"`
|
||||
DenebBlock *BlindedBeaconBlockDenebJson `json:"deneb_block"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
@@ -411,6 +413,7 @@ type BeaconBlockContainerV2Json struct {
|
||||
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
||||
BellatrixBlock *BeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
CapellaBlock *BeaconBlockCapellaJson `json:"capella_block"`
|
||||
DenebBlock *BeaconBlockDenebJson `json:"deneb_block"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockContainerJson struct {
|
||||
@@ -418,6 +421,7 @@ type BlindedBeaconBlockContainerJson struct {
|
||||
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
|
||||
BellatrixBlock *BlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
CapellaBlock *BlindedBeaconBlockCapellaJson `json:"capella_block"`
|
||||
DenebBlock *BlindedBeaconBlockDenebJson `json:"deneb_block"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockAltairContainerJson struct {
|
||||
@@ -435,6 +439,11 @@ type SignedBeaconBlockCapellaContainerJson struct {
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockDenebContainerJson struct {
|
||||
Message *BeaconBlockDenebJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockBellatrixContainerJson struct {
|
||||
Message *BlindedBeaconBlockBellatrixJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
@@ -445,6 +454,11 @@ type SignedBlindedBeaconBlockCapellaContainerJson struct {
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockDenebContainerJson struct {
|
||||
Message *BlindedBeaconBlockDenebJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type BeaconBlockAltairJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -469,6 +483,14 @@ type BeaconBlockCapellaJson struct {
|
||||
Body *BeaconBlockBodyCapellaJson `json:"body"`
|
||||
}
|
||||
|
||||
type BeaconBlockDenebJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
Body *BeaconBlockBodyDenebJson `json:"body"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBellatrixJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -485,6 +507,14 @@ type BlindedBeaconBlockCapellaJson struct {
|
||||
Body *BlindedBeaconBlockBodyCapellaJson `json:"body"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockDenebJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
Body *BlindedBeaconBlockBodyDenebJson `json:"body"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyAltairJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
@@ -524,6 +554,21 @@ type BeaconBlockBodyCapellaJson struct {
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyDenebJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti" hex:"true"`
|
||||
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
|
||||
Attestations []*AttestationJson `json:"attestations"`
|
||||
Deposits []*DepositJson `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregateJson `json:"sync_aggregate"`
|
||||
ExecutionPayload *ExecutionPayloadDenebJson `json:"execution_payload"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments" hex:"true"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyBellatrixJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
@@ -551,6 +596,21 @@ type BlindedBeaconBlockBodyCapellaJson struct {
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyDenebJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti" hex:"true"`
|
||||
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
|
||||
Attestations []*AttestationJson `json:"attestations"`
|
||||
Deposits []*DepositJson `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregateJson `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderDenebJson `json:"execution_payload_header"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChangeJson `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments" hex:"true"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
@@ -586,6 +646,25 @@ type ExecutionPayloadCapellaJson struct {
|
||||
Withdrawals []*WithdrawalJson `json:"withdrawals"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadDenebJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
ReceiptsRoot string `json:"receipts_root" hex:"true"`
|
||||
LogsBloom string `json:"logs_bloom" hex:"true"`
|
||||
PrevRandao string `json:"prev_randao" hex:"true"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data" hex:"true"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
|
||||
ExcessDataGas string `json:"excess_data_gas" uint256:"true"`
|
||||
BlockHash string `json:"block_hash" hex:"true"`
|
||||
Transactions []string `json:"transactions" hex:"true"`
|
||||
Withdrawals []*WithdrawalJson `json:"withdrawals"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
@@ -621,6 +700,25 @@ type ExecutionPayloadHeaderCapellaJson struct {
|
||||
WithdrawalsRoot string `json:"withdrawals_root" hex:"true"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderDenebJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
ReceiptsRoot string `json:"receipts_root" hex:"true"`
|
||||
LogsBloom string `json:"logs_bloom" hex:"true"`
|
||||
PrevRandao string `json:"prev_randao" hex:"true"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data" hex:"true"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
|
||||
ExcessDataGas string `json:"excess_data_gas" uint256:"true"`
|
||||
BlockHash string `json:"block_hash" hex:"true"`
|
||||
TransactionsRoot string `json:"transactions_root" hex:"true"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root" hex:"true"`
|
||||
}
|
||||
|
||||
type SyncAggregateJson struct {
|
||||
SyncCommitteeBits string `json:"sync_committee_bits" hex:"true"`
|
||||
SyncCommitteeSignature string `json:"sync_committee_signature" hex:"true"`
|
||||
@@ -877,11 +975,42 @@ type BeaconStateCapellaJson struct {
|
||||
HistoricalSummaries []*HistoricalSummaryJson `json:"historical_summaries"`
|
||||
}
|
||||
|
||||
type BeaconStateDenebJson struct {
|
||||
GenesisTime string `json:"genesis_time"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root" hex:"true"`
|
||||
Slot string `json:"slot"`
|
||||
Fork *ForkJson `json:"fork"`
|
||||
LatestBlockHeader *BeaconBlockHeaderJson `json:"latest_block_header"`
|
||||
BlockRoots []string `json:"block_roots" hex:"true"`
|
||||
StateRoots []string `json:"state_roots" hex:"true"`
|
||||
HistoricalRoots []string `json:"historical_roots" hex:"true"`
|
||||
Eth1Data *Eth1DataJson `json:"eth1_data"`
|
||||
Eth1DataVotes []*Eth1DataJson `json:"eth1_data_votes"`
|
||||
Eth1DepositIndex string `json:"eth1_deposit_index"`
|
||||
Validators []*ValidatorJson `json:"validators"`
|
||||
Balances []string `json:"balances"`
|
||||
RandaoMixes []string `json:"randao_mixes" hex:"true"`
|
||||
Slashings []string `json:"slashings"`
|
||||
PreviousEpochParticipation EpochParticipation `json:"previous_epoch_participation"`
|
||||
CurrentEpochParticipation EpochParticipation `json:"current_epoch_participation"`
|
||||
JustificationBits string `json:"justification_bits" hex:"true"`
|
||||
PreviousJustifiedCheckpoint *CheckpointJson `json:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *CheckpointJson `json:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
|
||||
InactivityScores []string `json:"inactivity_scores"`
|
||||
CurrentSyncCommittee *SyncCommitteeJson `json:"current_sync_committee"`
|
||||
NextSyncCommittee *SyncCommitteeJson `json:"next_sync_committee"`
|
||||
LatestExecutionPayloadHeader *ExecutionPayloadHeaderDenebJson `json:"latest_execution_payload_header"`
|
||||
NextWithdrawalIndex string `json:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
}
|
||||
|
||||
type BeaconStateContainerV2Json struct {
|
||||
Phase0State *BeaconStateJson `json:"phase0_state"`
|
||||
AltairState *BeaconStateAltairJson `json:"altair_state"`
|
||||
BellatrixState *BeaconStateBellatrixJson `json:"bellatrix_state"`
|
||||
CapellaState *BeaconStateCapellaJson `json:"capella_state"`
|
||||
DenebState *BeaconStateDenebJson `json:"deneb_state"`
|
||||
}
|
||||
|
||||
type ForkJson struct {
|
||||
@@ -1185,3 +1314,14 @@ type EventErrorJson struct {
|
||||
StatusCode int `json:"status_code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type blobsSidecarJson struct {
|
||||
BeaconBlockRoot string `json:"beacon_block_root" hex:"true"`
|
||||
BeaconBlockSlot string `json:"beacon_block_slot"`
|
||||
Blobs []string `json:"blobs" hex:"true"`
|
||||
AggregatedProof string `json:"kzg_aggregated_proof" hex:"true"`
|
||||
}
|
||||
|
||||
type blobsSidecarResponseJson struct {
|
||||
Data *blobsSidecarJson `json:"data"`
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"blinded_blocks.go",
|
||||
"blobs.go",
|
||||
"blocks.go",
|
||||
"config.go",
|
||||
"log.go",
|
||||
|
||||
13
beacon-chain/rpc/eth/beacon/blobs.go
Normal file
13
beacon-chain/rpc/eth/beacon/blobs.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
)
|
||||
|
||||
func (bs *Server) GetBlobsSidecar(ctx context.Context, req *ethpbv1.BlobsRequest) (*ethpbv1.BlobsResponse, error) {
|
||||
// TODO: implement this with new blob type request
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
@@ -382,6 +382,14 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
|
||||
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = bs.getBlockDeneb(ctx, blk)
|
||||
if result != nil {
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
|
||||
}
|
||||
|
||||
@@ -436,6 +444,14 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
|
||||
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
result, err = bs.getSSZBlockDeneb(ctx, blk)
|
||||
if result != nil {
|
||||
return result, nil
|
||||
}
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
|
||||
}
|
||||
@@ -817,6 +833,76 @@ func (bs *Server) getBlockCapella(ctx context.Context, blk interfaces.ReadOnlySi
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) getBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlockResponseV2, error) {
|
||||
denebBlk, err := blk.PbDenebBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
if blindedDenebBlk, err := blk.PbBlindedDenebBlock(); err == nil {
|
||||
if blindedDenebBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
signedFullBlock, err := bs.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
|
||||
}
|
||||
denebBlk, err = signedFullBlock.PbDenebBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlockResponseV2{
|
||||
Version: ethpbv2.Version_Deneb,
|
||||
Data: ðpbv2.SignedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBeaconBlockContainer_DenebBlock{DenebBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if denebBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
return ðpbv2.BlockResponseV2{
|
||||
Version: ethpbv2.Version_Deneb,
|
||||
Data: ðpbv2.SignedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBeaconBlockContainer_DenebBlock{DenebBlock: v2Blk},
|
||||
Signature: sig[:],
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getSSZBlockPhase0(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
|
||||
phase0Blk, err := blk.PbPhase0Block()
|
||||
if err != nil {
|
||||
@@ -1012,6 +1098,82 @@ func (bs *Server) getSSZBlockCapella(ctx context.Context, blk interfaces.ReadOnl
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_CAPELLA, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) getSSZBlockDeneb(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
|
||||
denebBlk, err := blk.PbDenebBlock()
|
||||
if err != nil {
|
||||
// ErrUnsupportedGetter means that we have another block type
|
||||
if errors.Is(err, blocks.ErrUnsupportedGetter) {
|
||||
if blindedDenebBlk, err := blk.PbBlindedDenebBlock(); err == nil {
|
||||
if blindedDenebBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
signedFullBlock, err := bs.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block")
|
||||
}
|
||||
denebBlk, err = signedFullBlock.PbDenebBlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signed beacon block")
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert signed beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
data := ðpbv2.SignedBeaconBlockDeneb{
|
||||
Message: v2Blk,
|
||||
Signature: sig[:],
|
||||
}
|
||||
sszData, err := data.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return ðpbv2.SSZContainer{
|
||||
Version: ethpbv2.Version_Deneb,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Data: sszData,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if denebBlk == nil {
|
||||
return nil, errNilBlock
|
||||
}
|
||||
v2Blk, err := migration.V1Alpha1BeaconBlockDenebToV2(denebBlk.Block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not convert signed beacon block")
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root")
|
||||
}
|
||||
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check if block is optimistic")
|
||||
}
|
||||
sig := blk.Signature()
|
||||
data := ðpbv2.SignedBeaconBlockDeneb{
|
||||
Message: v2Blk,
|
||||
Signature: sig[:],
|
||||
}
|
||||
sszData, err := data.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not marshal block into SSZ")
|
||||
}
|
||||
return ðpbv2.SSZContainer{Version: ethpbv2.Version_Deneb, ExecutionOptimistic: isOptimistic, Data: sszData}, nil
|
||||
}
|
||||
|
||||
func (bs *Server) submitPhase0Block(ctx context.Context, phase0Blk *ethpbv1.BeaconBlock, sig []byte) error {
|
||||
v1alpha1Blk, err := migration.V1ToV1Alpha1SignedBlock(ðpbv1.SignedBeaconBlock{Block: phase0Blk, Signature: sig})
|
||||
if err != nil {
|
||||
|
||||
@@ -105,6 +105,8 @@ func TestGetSpec(t *testing.T) {
|
||||
config.MaxWithdrawalsPerPayload = 74
|
||||
config.MaxBlsToExecutionChanges = 75
|
||||
config.MaxValidatorsPerWithdrawalsSweep = 76
|
||||
config.DenebForkEpoch = 77
|
||||
config.DenebForkVersion = []byte("DenebForkVersion")
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
@@ -130,6 +132,9 @@ func TestGetSpec(t *testing.T) {
|
||||
var dam [4]byte
|
||||
copy(dam[:], []byte{'1', '0', '0', '0'})
|
||||
config.DomainApplicationMask = dam
|
||||
var dbs [4]byte
|
||||
copy(dbs[:], []byte{'0', '0', '0', '8'})
|
||||
config.DomainBlobSidecar = dbs
|
||||
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
@@ -137,7 +142,7 @@ func TestGetSpec(t *testing.T) {
|
||||
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 105, len(resp.Data))
|
||||
assert.Equal(t, 108, len(resp.Data))
|
||||
for k, v := range resp.Data {
|
||||
switch k {
|
||||
case "CONFIG_NAME":
|
||||
@@ -363,8 +368,14 @@ func TestGetSpec(t *testing.T) {
|
||||
case "REORG_WEIGHT_THRESHOLD":
|
||||
assert.Equal(t, "20", v)
|
||||
case "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY":
|
||||
case "DENEB_FORK_EPOCH":
|
||||
assert.Equal(t, "77", v)
|
||||
case "DENEB_FORK_VERSION":
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("DenebForkVersion")), v)
|
||||
case "DOMAIN_BLOB_SIDECAR":
|
||||
assert.Equal(t, "0x30303038", v)
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
t.Errorf("Unknown key: %s", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -104,6 +104,19 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.BeaconState
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: isFinalized,
|
||||
}, nil
|
||||
case version.Deneb:
|
||||
protoState, err := migration.BeaconStateDenebToProto(beaconSt)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not convert state to proto: %v", err)
|
||||
}
|
||||
return ðpbv2.BeaconStateResponseV2{
|
||||
Version: ethpbv2.Version_Deneb,
|
||||
Data: ðpbv2.BeaconStateContainer{
|
||||
State: ðpbv2.BeaconStateContainer_DenebState{DenebState: protoState},
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
}, nil
|
||||
|
||||
default:
|
||||
return nil, status.Error(codes.Internal, "Unsupported state version")
|
||||
}
|
||||
@@ -133,6 +146,8 @@ func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.BeaconSt
|
||||
ver = ethpbv2.Version_BELLATRIX
|
||||
case version.Capella:
|
||||
ver = ethpbv2.Version_CAPELLA
|
||||
case version.Deneb:
|
||||
ver = ethpbv2.Version_Deneb
|
||||
default:
|
||||
return nil, status.Error(codes.Internal, "Unsupported state version")
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ go_library(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
|
||||
v1alpha1validator "github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||
@@ -24,6 +25,7 @@ type Server struct {
|
||||
StateFetcher statefetcher.Fetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
BLSChangesPool blstoexec.PoolManager
|
||||
V1Alpha1Server *v1alpha1validator.Server
|
||||
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"assignments.go",
|
||||
"attestations.go",
|
||||
"blocks.go",
|
||||
"blstoexec.go",
|
||||
"committees.go",
|
||||
"config.go",
|
||||
"log.go",
|
||||
@@ -36,6 +37,7 @@ go_library(
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
|
||||
@@ -122,7 +122,7 @@ func convertToBlockContainer(blk interfaces.ReadOnlySignedBeaconBlock, root [32]
|
||||
}
|
||||
ctr.Block = ðpb.BeaconBlockContainer_BellatrixBlock{BellatrixBlock: rBlk}
|
||||
}
|
||||
case version.Capella:
|
||||
case version.Capella, version.Deneb:
|
||||
if blk.IsBlinded() {
|
||||
rBlk, err := blk.PbBlindedCapellaBlock()
|
||||
if err != nil {
|
||||
@@ -266,6 +266,7 @@ func (bs *Server) listBlocksForGenesis(ctx context.Context, _ *ethpb.ListBlocksR
|
||||
//
|
||||
// This includes the head block slot and root as well as information about
|
||||
// the most recent finalized and justified slots.
|
||||
// DEPRECATED: This endpoint is superseded by the /eth/v1/beacon API endpoint
|
||||
func (bs *Server) GetChainHead(ctx context.Context, _ *emptypb.Empty) (*ethpb.ChainHead, error) {
|
||||
return bs.chainHeadRetrieval(ctx)
|
||||
}
|
||||
|
||||
24
beacon-chain/rpc/prysm/v1alpha1/beacon/blstoexec.go
Normal file
24
beacon-chain/rpc/prysm/v1alpha1/beacon/blstoexec.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// SubmitBLSToExecutionChange receives a withdrawal credential change object via
|
||||
// RPC and injects it into the beacon node's operations pool.
|
||||
// Submission into this pool does not guarantee inclusion into a beacon block. If the object passes validation
|
||||
// the node MUST broadcast it
|
||||
func (bs *Server) SubmitBLSToExecutionChange(
|
||||
ctx context.Context,
|
||||
req *ethpb.SignedBLSToExecutionChange,
|
||||
) (*ethpb.BLSToExecutionChangeResponse, error) {
|
||||
bs.BLSChangesPool.InsertBLSToExecChange(req)
|
||||
if err := bs.Broadcaster.Broadcast(ctx, req); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast SigledBLSToExecutionChange object: %v", err)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
|
||||
@@ -40,6 +41,7 @@ type Server struct {
|
||||
Broadcaster p2p.Broadcaster
|
||||
AttestationsPool attestations.Pool
|
||||
SlashingsPool slashings.PoolManager
|
||||
BLSChangesPool blstoexec.PoolManager
|
||||
ChainStartChan chan time.Time
|
||||
ReceivedAttestationsBuffer chan *ethpb.Attestation
|
||||
CollectedAttestationsBuffer chan []*ethpb.Attestation
|
||||
|
||||
@@ -27,6 +27,7 @@ go_library(
|
||||
"@com_github_ipfs_go_log_v2//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"google.golang.org/grpc/codes"
|
||||
@@ -92,7 +93,7 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
|
||||
aVersion = ""
|
||||
}
|
||||
peerInfo := ðpb.DebugPeerResponse_PeerInfo{
|
||||
Protocols: protocols,
|
||||
Protocols: protocol.ConvertToStrings(protocols),
|
||||
FaultCount: uint64(resp),
|
||||
ProtocolVersion: pVersion,
|
||||
AgentVersion: aVersion,
|
||||
|
||||
@@ -45,13 +45,14 @@ go_library(
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
@@ -62,6 +63,7 @@ go_library(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/blobs:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/payload-attribute:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -85,6 +87,7 @@ go_library(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_protolambda_go_kzg//eth:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
|
||||
@@ -100,6 +100,17 @@ func sendVerifiedBlocks(stream ethpb.BeaconNodeValidator_StreamBlocksAltairServe
|
||||
return nil
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: phBlk}
|
||||
case version.Deneb:
|
||||
pb, err := data.SignedBlock.Proto()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get protobuf block")
|
||||
}
|
||||
phBlk, ok := pb.(*ethpb.SignedBeaconBlockDeneb)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockDeneb")
|
||||
return nil
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_DenebBlock{DenebBlock: phBlk}
|
||||
}
|
||||
|
||||
if err := stream.Send(b); err != nil {
|
||||
@@ -149,6 +160,8 @@ func (vs *Server) sendBlocks(stream ethpb.BeaconNodeValidator_StreamBlocksAltair
|
||||
b.Block = ðpb.StreamBlocksResponse_BellatrixBlock{BellatrixBlock: p}
|
||||
case *ethpb.SignedBeaconBlockCapella:
|
||||
b.Block = ðpb.StreamBlocksResponse_CapellaBlock{CapellaBlock: p}
|
||||
case *ethpb.SignedBeaconBlockDeneb:
|
||||
b.Block = ðpb.StreamBlocksResponse_DenebBlock{DenebBlock: p}
|
||||
default:
|
||||
log.Errorf("Unknown block type %T", p)
|
||||
}
|
||||
|
||||
@@ -11,11 +11,13 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
emptypb "github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/block"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition/interop"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
@@ -23,6 +25,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -50,7 +53,11 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
// process attestations and update head in forkchoice
|
||||
vs.ForkFetcher.ForkChoicer().Lock()
|
||||
vs.HeadUpdater.UpdateHead(ctx, vs.ForkFetcher.CurrentSlot())
|
||||
headRoot := vs.ForkFetcher.ForkChoicer().CachedHeadRoot()
|
||||
parentRoot := vs.ForkFetcher.ForkChoicer().GetProposerHead()
|
||||
if parentRoot != headRoot {
|
||||
blockchain.LateBlockAttemptedReorgCount.Inc()
|
||||
}
|
||||
vs.ForkFetcher.ForkChoicer().Unlock()
|
||||
|
||||
// An optimistic validator MUST NOT produce a block (i.e., sign across the DOMAIN_BEACON_PROPOSER domain).
|
||||
@@ -117,7 +124,8 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
vs.setSyncAggregate(ctx, sBlk)
|
||||
|
||||
// Set execution data. New in Bellatrix.
|
||||
if err := vs.setExecutionData(ctx, sBlk, head); err != nil {
|
||||
blobs, err := vs.setExecutionData(ctx, sBlk, head)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set execution data: %v", err)
|
||||
}
|
||||
|
||||
@@ -126,6 +134,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
|
||||
sr, err := vs.computeStateRoot(ctx, sBlk)
|
||||
if err != nil {
|
||||
interop.WriteBlockToDisk("proposer", sBlk, true /*failed*/)
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute state root: %v", err)
|
||||
}
|
||||
sBlk.SetStateRoot(sr)
|
||||
@@ -134,6 +143,38 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not convert block to proto: %v", err)
|
||||
}
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().DenebForkEpoch {
|
||||
blk, ok := pb.(*ethpb.BeaconBlockDeneb)
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.Internal, "Could not cast block to BeaconBlockDeneb")
|
||||
}
|
||||
br, err := blk.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
|
||||
}
|
||||
kzgs, err := sBlk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get kzg commitments: %v", err)
|
||||
}
|
||||
validatorBlobs := make([]*ethpb.BlobSidecar, len(blk.Body.BlobKzgCommitments))
|
||||
for i, b := range blobs {
|
||||
validatorBlobs[i] = ðpb.BlobSidecar{
|
||||
BlockRoot: br[:],
|
||||
Index: uint64(i),
|
||||
Slot: blk.Slot,
|
||||
BlockParentRoot: blk.ParentRoot,
|
||||
ProposerIndex: blk.ProposerIndex,
|
||||
Blob: b,
|
||||
KzgCommitment: kzgs[i],
|
||||
KzgProof: []byte{}, // TODO(TT): add proof
|
||||
}
|
||||
}
|
||||
blkAndBlobs := ðpb.BeaconBlockDenebAndBlobs{
|
||||
Block: blk,
|
||||
Blobs: validatorBlobs,
|
||||
}
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Deneb{Deneb: blkAndBlobs}}, nil
|
||||
}
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().CapellaForkEpoch {
|
||||
if sBlk.IsBlinded() {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedCapella{BlindedCapella: pb.(*ethpb.BlindedBeaconBlockCapella)}}, nil
|
||||
@@ -157,11 +198,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
|
||||
defer span.End()
|
||||
blk, err := blocks.NewSignedBeaconBlock(req.Block)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Could not decode block: %v", err)
|
||||
}
|
||||
return vs.proposeGenericBeaconBlock(ctx, blk)
|
||||
return vs.proposeGenericBeaconBlock(ctx, req)
|
||||
}
|
||||
|
||||
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
|
||||
@@ -244,9 +281,15 @@ func (vs *Server) GetFeeRecipientByPubKey(ctx context.Context, request *ethpb.Fe
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.proposeGenericBeaconBlock")
|
||||
defer span.End()
|
||||
|
||||
blk, err := blocks.NewSignedBeaconBlock(req.Block)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Could not decode block: %v", err)
|
||||
}
|
||||
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not tree hash block: %v", err)
|
||||
@@ -264,16 +307,6 @@ func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk interfaces.
|
||||
}
|
||||
}
|
||||
|
||||
// Do not block proposal critical path with debug logging or block feed updates.
|
||||
defer func() {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debugf(
|
||||
"Block proposal received via RPC")
|
||||
vs.BlockNotifier.BlockFeed().Send(&feed.Event{
|
||||
Type: blockfeed.ReceivedBlock,
|
||||
Data: &blockfeed.ReceivedBlockData{SignedBlock: blk},
|
||||
})
|
||||
}()
|
||||
|
||||
// Broadcast the new block to the network.
|
||||
blkPb, err := blk.Proto()
|
||||
if err != nil {
|
||||
@@ -290,6 +323,28 @@ func (vs *Server) proposeGenericBeaconBlock(ctx context.Context, blk interfaces.
|
||||
return nil, fmt.Errorf("could not process beacon block: %v", err)
|
||||
}
|
||||
|
||||
if blk.Version() >= version.Deneb {
|
||||
b, ok := req.GetBlock().(*ethpb.GenericSignedBeaconBlock_Deneb)
|
||||
if !ok {
|
||||
return nil, status.Error(codes.Internal, "Could not cast block to Deneb")
|
||||
}
|
||||
for _, sidecar := range b.Deneb.Blobs {
|
||||
if err := vs.P2P.BroadcastBlob(ctx, sidecar.Message.Index, sidecar); err != nil {
|
||||
return nil, errors.Wrap(err, "could not broadcast blob sidecar")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Do not block proposal critical path with debug logging or block feed updates.
|
||||
defer func() {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debugf(
|
||||
"Block proposal received via RPC")
|
||||
vs.BlockNotifier.BlockFeed().Send(&feed.Event{
|
||||
Type: blockfeed.ReceivedBlock,
|
||||
Data: &blockfeed.ReceivedBlockData{SignedBlock: blk},
|
||||
})
|
||||
}()
|
||||
|
||||
return ðpb.ProposeResponse{
|
||||
BlockRoot: root[:],
|
||||
}, nil
|
||||
|
||||
@@ -80,7 +80,7 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
|
||||
// filter separates attestation list into two groups: valid and invalid attestations.
|
||||
// The first group passes the all the required checks for attestation to be considered for proposing.
|
||||
// And attestations from the second group should be deleted.
|
||||
func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (proposerAtts, proposerAtts) {
|
||||
func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (proposerAtts, proposerAtts, error) {
|
||||
validAtts := make([]*ethpb.Attestation, 0, len(a))
|
||||
invalidAtts := make([]*ethpb.Attestation, 0, len(a))
|
||||
var attestationProcessor func(context.Context, state.BeaconState, *ethpb.Attestation) (state.BeaconState, error)
|
||||
@@ -98,7 +98,7 @@ func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (propose
|
||||
}
|
||||
} else {
|
||||
// Exit early if there is an unknown state type.
|
||||
return validAtts, invalidAtts
|
||||
return validAtts, invalidAtts, errors.Errorf("unknown state type: %v", st.Version())
|
||||
}
|
||||
|
||||
for _, att := range a {
|
||||
@@ -108,7 +108,7 @@ func (a proposerAtts) filter(ctx context.Context, st state.BeaconState) (propose
|
||||
}
|
||||
invalidAtts = append(invalidAtts, att)
|
||||
}
|
||||
return validAtts, invalidAtts
|
||||
return validAtts, invalidAtts, nil
|
||||
}
|
||||
|
||||
// sortByProfitability orders attestations by highest slot and by highest aggregation bit count.
|
||||
@@ -247,7 +247,10 @@ func (vs *Server) validateAndDeleteAttsInPool(ctx context.Context, st state.Beac
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.validateAndDeleteAttsInPool")
|
||||
defer span.End()
|
||||
|
||||
validAtts, invalidAtts := proposerAtts(atts).filter(ctx, st)
|
||||
validAtts, invalidAtts, err := proposerAtts(atts).filter(ctx, st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := vs.deleteAttsInPool(ctx, invalidAtts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -38,11 +38,11 @@ var builderGetPayloadMissCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
const blockBuilderTimeout = 1 * time.Second
|
||||
|
||||
// Sets the execution data for the block. Execution data can come from local EL client or remote builder depends on validator registration and circuit breaker conditions.
|
||||
func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, headState state.BeaconState) error {
|
||||
func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, headState state.BeaconState) ([]*enginev1.Blob, error) {
|
||||
idx := blk.Block().ProposerIndex()
|
||||
slot := blk.Block().Slot()
|
||||
if slots.ToEpoch(slot) < params.BeaconConfig().BellatrixForkEpoch {
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
canUseBuilder, err := vs.canUseBuilder(ctx, slot, idx)
|
||||
@@ -56,14 +56,14 @@ func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.SignedBea
|
||||
} else {
|
||||
switch {
|
||||
case blk.Version() >= version.Capella:
|
||||
localPayload, err := vs.getExecutionPayload(ctx, slot, idx, blk.Block().ParentRoot(), headState)
|
||||
localPayload, _, err := vs.getExecutionPayload(ctx, slot, idx, blk.Block().ParentRoot(), headState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get execution payload")
|
||||
return nil, errors.Wrap(err, "failed to get execution payload")
|
||||
}
|
||||
// Compare payload values between local and builder. Default to the local value if it is higher.
|
||||
localValue, err := localPayload.Value()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get local payload value")
|
||||
return nil, errors.Wrap(err, "failed to get local payload value")
|
||||
}
|
||||
builderValue, err := builderPayload.Value()
|
||||
if err != nil {
|
||||
@@ -72,7 +72,7 @@ func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.SignedBea
|
||||
|
||||
withdrawalsMatched, err := matchingWithdrawalsRoot(localPayload, builderPayload)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to match withdrawals root")
|
||||
return nil, errors.Wrap(err, "failed to match withdrawals root")
|
||||
}
|
||||
// If we can't get the builder value, just use local block.
|
||||
if builderValue.Cmp(localValue) > 0 && withdrawalsMatched { // Builder value is higher and withdrawals match.
|
||||
@@ -80,31 +80,38 @@ func (vs *Server) setExecutionData(ctx context.Context, blk interfaces.SignedBea
|
||||
if err := blk.SetExecution(builderPayload); err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to set builder payload")
|
||||
} else {
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"localValue": localValue,
|
||||
"builderValue": builderValue,
|
||||
}).Warn("Proposer: using local execution payload because higher value")
|
||||
return blk.SetExecution(localPayload)
|
||||
return nil, blk.SetExecution(localPayload)
|
||||
default: // Bellatrix case.
|
||||
blk.SetBlinded(true)
|
||||
if err := blk.SetExecution(builderPayload); err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to set builder payload")
|
||||
} else {
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
executionData, err := vs.getExecutionPayload(ctx, slot, idx, blk.Block().ParentRoot(), headState)
|
||||
executionData, blobsBundle, err := vs.getExecutionPayload(ctx, slot, idx, blk.Block().ParentRoot(), headState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get execution payload")
|
||||
return nil, errors.Wrap(err, "failed to get execution payload")
|
||||
}
|
||||
return blk.SetExecution(executionData)
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch && len(blobsBundle.KzgCommitments) > 0 {
|
||||
// TODO: check block hash matches blob bundle hash
|
||||
if err := blk.SetBlobKzgCommitments(blobsBundle.KzgCommitments); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set blob kzg commitments")
|
||||
}
|
||||
return blobsBundle.Blobs, nil
|
||||
}
|
||||
|
||||
return nil, blk.SetExecution(executionData)
|
||||
}
|
||||
|
||||
// This function retrieves the payload header given the slot number and the validator index.
|
||||
@@ -188,17 +195,17 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot primitiv
|
||||
"builderPubKey": fmt.Sprintf("%#x", bid.Pubkey()),
|
||||
"blockHash": fmt.Sprintf("%#x", header.BlockHash()),
|
||||
}).Info("Received header with bid")
|
||||
|
||||
return header, nil
|
||||
}
|
||||
|
||||
// This function retrieves the full payload block using the input blind block. This input must be versioned as
|
||||
// bellatrix blind block. The output block will contain the full payload. The original header block
|
||||
// will be returned the block builder is not configured.
|
||||
func (vs *Server) unblindBuilderBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
func (vs *Server) unblindBuilderBlock(ctx context.Context, b interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, error) {
|
||||
if err := consensusblocks.BeaconBlockIsNil(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// No-op if the input block is not version blind and bellatrix.
|
||||
if b.Version() != version.Bellatrix || !b.IsBlinded() {
|
||||
return b, nil
|
||||
|
||||
@@ -71,7 +71,8 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
t.Run("No builder configured. Use local block", func(t *testing.T) {
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
|
||||
_, err = vs.setExecutionData(context.Background(), blk, capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
e, err := blk.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), e.BlockNumber()) // Local block
|
||||
@@ -122,7 +123,8 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
vs.ForkFetcher.ForkChoicer().SetGenesisTime(uint64(time.Now().Unix()))
|
||||
vs.TimeFetcher = chain
|
||||
vs.HeadFetcher = chain
|
||||
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
|
||||
_, err = vs.setExecutionData(context.Background(), blk, capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
e, err := blk.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), e.BlockNumber()) // Local block because incorrect withdrawals
|
||||
@@ -175,7 +177,8 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
vs.ForkFetcher.ForkChoicer().SetGenesisTime(uint64(time.Now().Unix()))
|
||||
vs.TimeFetcher = chain
|
||||
vs.HeadFetcher = chain
|
||||
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
|
||||
_, err = vs.setExecutionData(context.Background(), blk, capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
e, err := blk.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), e.BlockNumber()) // Builder block
|
||||
@@ -184,7 +187,8 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
|
||||
require.NoError(t, err)
|
||||
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: big.NewInt(3)}
|
||||
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
|
||||
_, err = vs.setExecutionData(context.Background(), blk, capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
e, err := blk.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(3), e.BlockNumber()) // Local block
|
||||
@@ -196,7 +200,8 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
ErrGetHeader: errors.New("fault"),
|
||||
}
|
||||
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 4}, BlockValue: big.NewInt(0)}
|
||||
require.NoError(t, vs.setExecutionData(context.Background(), blk, capellaTransitionState))
|
||||
_, err = vs.setExecutionData(context.Background(), blk, capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
e, err := blk.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(4), e.BlockNumber()) // Local block
|
||||
@@ -367,10 +372,10 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blk interfaces.ReadOnlySignedBeaconBlock
|
||||
blk interfaces.SignedBeaconBlock
|
||||
mock *builderTest.MockBuilderService
|
||||
err string
|
||||
returnedBlk interfaces.ReadOnlySignedBeaconBlock
|
||||
returnedBlk interfaces.SignedBeaconBlock
|
||||
}{
|
||||
{
|
||||
name: "nil block",
|
||||
@@ -379,12 +384,12 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "old block version",
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk: func() interfaces.SignedBeaconBlock {
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
}(),
|
||||
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
returnedBlk: func() interfaces.SignedBeaconBlock {
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
@@ -392,7 +397,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "not configured",
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk: func() interfaces.SignedBeaconBlock {
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
@@ -400,7 +405,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
mock: &builderTest.MockBuilderService{
|
||||
HasConfigured: false,
|
||||
},
|
||||
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
returnedBlk: func() interfaces.SignedBeaconBlock {
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
@@ -408,7 +413,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "submit blind block error",
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk: func() interfaces.SignedBeaconBlock {
|
||||
b := util.NewBlindedBeaconBlockBellatrix()
|
||||
b.Block.Slot = 1
|
||||
b.Block.ProposerIndex = 2
|
||||
@@ -425,7 +430,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "head and payload root mismatch",
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk: func() interfaces.SignedBeaconBlock {
|
||||
b := util.NewBlindedBeaconBlockBellatrix()
|
||||
b.Block.Slot = 1
|
||||
b.Block.ProposerIndex = 2
|
||||
@@ -437,7 +442,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
HasConfigured: true,
|
||||
Payload: p,
|
||||
},
|
||||
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
returnedBlk: func() interfaces.SignedBeaconBlock {
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
b.Block.Slot = 1
|
||||
b.Block.ProposerIndex = 2
|
||||
@@ -450,7 +455,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "can get payload",
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk: func() interfaces.SignedBeaconBlock {
|
||||
b := util.NewBlindedBeaconBlockBellatrix()
|
||||
b.Block.Slot = 1
|
||||
b.Block.ProposerIndex = 2
|
||||
@@ -476,7 +481,7 @@ func TestServer_getBuilderBlock(t *testing.T) {
|
||||
HasConfigured: true,
|
||||
Payload: p,
|
||||
},
|
||||
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
returnedBlk: func() interfaces.SignedBeaconBlock {
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
b.Block.Slot = 1
|
||||
b.Block.ProposerIndex = 2
|
||||
|
||||
@@ -35,7 +35,7 @@ func (vs *Server) setBlsToExecData(blk interfaces.SignedBeaconBlock, headState s
|
||||
}
|
||||
}
|
||||
|
||||
func (vs *Server) unblindBuilderBlockCapella(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
func (vs *Server) unblindBuilderBlockCapella(ctx context.Context, b interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, error) {
|
||||
if err := consensusblocks.BeaconBlockIsNil(b); err != nil {
|
||||
return nil, errors.Wrap(err, "block is nil")
|
||||
}
|
||||
|
||||
@@ -22,10 +22,10 @@ func TestServer_unblindBuilderCapellaBlock(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blk interfaces.ReadOnlySignedBeaconBlock
|
||||
blk interfaces.SignedBeaconBlock
|
||||
mock *builderTest.MockBuilderService
|
||||
err string
|
||||
returnedBlk interfaces.ReadOnlySignedBeaconBlock
|
||||
returnedBlk interfaces.SignedBeaconBlock
|
||||
}{
|
||||
{
|
||||
name: "nil block",
|
||||
@@ -34,12 +34,12 @@ func TestServer_unblindBuilderCapellaBlock(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "old block version",
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk: func() interfaces.SignedBeaconBlock {
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
}(),
|
||||
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
returnedBlk: func() interfaces.SignedBeaconBlock {
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
@@ -47,7 +47,7 @@ func TestServer_unblindBuilderCapellaBlock(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "not configured",
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk: func() interfaces.SignedBeaconBlock {
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
@@ -55,7 +55,7 @@ func TestServer_unblindBuilderCapellaBlock(t *testing.T) {
|
||||
mock: &builderTest.MockBuilderService{
|
||||
HasConfigured: false,
|
||||
},
|
||||
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
returnedBlk: func() interfaces.SignedBeaconBlock {
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
@@ -63,7 +63,7 @@ func TestServer_unblindBuilderCapellaBlock(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "submit blind block error",
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk: func() interfaces.SignedBeaconBlock {
|
||||
b := util.NewBlindedBeaconBlockCapella()
|
||||
b.Block.Slot = 1
|
||||
b.Block.ProposerIndex = 2
|
||||
@@ -80,7 +80,7 @@ func TestServer_unblindBuilderCapellaBlock(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "can get payload",
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk: func() interfaces.SignedBeaconBlock {
|
||||
b := util.NewBlindedBeaconBlockCapella()
|
||||
b.Block.Slot = 1
|
||||
b.Block.ProposerIndex = 2
|
||||
@@ -109,7 +109,7 @@ func TestServer_unblindBuilderCapellaBlock(t *testing.T) {
|
||||
HasConfigured: true,
|
||||
PayloadCapella: p,
|
||||
},
|
||||
returnedBlk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
returnedBlk: func() interfaces.SignedBeaconBlock {
|
||||
b := util.NewBeaconBlockCapella()
|
||||
b.Block.Slot = 1
|
||||
b.Block.ProposerIndex = 2
|
||||
|
||||
@@ -30,11 +30,16 @@ func getEmptyBlock(slot primitives.Slot) (interfaces.SignedBeaconBlock, error) {
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
|
||||
}
|
||||
default:
|
||||
case slots.ToEpoch(slot) < params.BeaconConfig().DenebForkEpoch:
|
||||
sBlk, err = blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockCapella{Block: ðpb.BeaconBlockCapella{Body: ðpb.BeaconBlockBodyCapella{}}})
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
|
||||
}
|
||||
default:
|
||||
sBlk, err = blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockDeneb{Block: ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{}}})
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
|
||||
}
|
||||
}
|
||||
return sBlk, err
|
||||
}
|
||||
|
||||
@@ -40,9 +40,10 @@ var (
|
||||
})
|
||||
)
|
||||
|
||||
// This returns the execution payload of a given slot. The function has full awareness of pre and post merge.
|
||||
// This returns the execution payload of a given slot.
|
||||
// The function has full awareness of pre and post merge.
|
||||
// The payload is computed given the respected time of merge.
|
||||
func (vs *Server) getExecutionPayload(ctx context.Context, slot primitives.Slot, vIdx primitives.ValidatorIndex, headRoot [32]byte, st state.BeaconState) (interfaces.ExecutionData, error) {
|
||||
func (vs *Server) getExecutionPayload(ctx context.Context, slot primitives.Slot, vIdx primitives.ValidatorIndex, headRoot [32]byte, st state.BeaconState) (interfaces.ExecutionData, *enginev1.BlobsBundle, error) {
|
||||
proposerID, payloadId, ok := vs.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, headRoot)
|
||||
feeRecipient := params.BeaconConfig().DefaultFeeRecipient
|
||||
recipient, err := vs.BeaconDB.FeeRecipientByValidatorID(ctx, vIdx)
|
||||
@@ -62,7 +63,7 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot primitives.Slot,
|
||||
"Please refer to our documentation for instructions")
|
||||
}
|
||||
default:
|
||||
return nil, errors.Wrap(err, "could not get fee recipient in db")
|
||||
return nil, nil, errors.Wrap(err, "could not get fee recipient in db")
|
||||
}
|
||||
|
||||
if ok && proposerID == vIdx && payloadId != [8]byte{} { // Payload ID is cache hit. Return the cached payload ID.
|
||||
@@ -73,10 +74,17 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot primitives.Slot,
|
||||
switch {
|
||||
case err == nil:
|
||||
warnIfFeeRecipientDiffers(payload, feeRecipient)
|
||||
return payload, nil
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
|
||||
sc, err := vs.ExecutionEngineCaller.GetBlobsBundle(ctx, pid)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get blobs bundle from execution client")
|
||||
}
|
||||
return payload, sc, nil
|
||||
}
|
||||
return payload, nil, nil
|
||||
case errors.Is(err, context.DeadlineExceeded):
|
||||
default:
|
||||
return nil, errors.Wrap(err, "could not get cached payload from execution client")
|
||||
return nil, nil, errors.Wrap(err, "could not get cached payload from execution client")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,53 +92,61 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot primitives.Slot,
|
||||
var hasTerminalBlock bool
|
||||
mergeComplete, err := blocks.IsMergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
t, err := slots.ToTime(st.GenesisTime(), slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if mergeComplete {
|
||||
header, err := st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
parentHash = header.BlockHash()
|
||||
} else {
|
||||
if activationEpochNotReached(slot) {
|
||||
return consensusblocks.WrappedExecutionPayload(emptyPayload())
|
||||
p, err := consensusblocks.WrappedExecutionPayload(emptyPayload())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return p, nil, nil
|
||||
}
|
||||
parentHash, hasTerminalBlock, err = vs.getTerminalBlockHashIfExists(ctx, uint64(t.Unix()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if !hasTerminalBlock {
|
||||
return consensusblocks.WrappedExecutionPayload(emptyPayload())
|
||||
p, err := consensusblocks.WrappedExecutionPayload(emptyPayload())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return p, nil, nil
|
||||
}
|
||||
}
|
||||
payloadIDCacheMiss.Inc()
|
||||
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
finalizedBlockHash := params.BeaconConfig().ZeroHash[:]
|
||||
finalizedRoot := bytesutil.ToBytes32(st.FinalizedCheckpoint().Root)
|
||||
if finalizedRoot != [32]byte{} { // finalized root could be zeros before the first finalized block.
|
||||
finalizedBlock, err := vs.BeaconDB.Block(ctx, bytesutil.ToBytes32(st.FinalizedCheckpoint().Root))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := consensusblocks.BeaconBlockIsNil(finalizedBlock); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
switch finalizedBlock.Version() {
|
||||
case version.Phase0, version.Altair: // Blocks before Bellatrix don't have execution payloads. Use zeros as the hash.
|
||||
default:
|
||||
finalizedPayload, err := finalizedBlock.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
finalizedBlockHash = finalizedPayload.BlockHash()
|
||||
}
|
||||
@@ -143,10 +159,10 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot primitives.Slot,
|
||||
}
|
||||
var attr payloadattribute.Attributer
|
||||
switch st.Version() {
|
||||
case version.Capella:
|
||||
case version.Capella, version.Deneb:
|
||||
withdrawals, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV2{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
@@ -155,7 +171,7 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot primitives.Slot,
|
||||
Withdrawals: withdrawals,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
case version.Bellatrix:
|
||||
attr, err = payloadattribute.New(&enginev1.PayloadAttributes{
|
||||
@@ -164,25 +180,32 @@ func (vs *Server) getExecutionPayload(ctx context.Context, slot primitives.Slot,
|
||||
SuggestedFeeRecipient: feeRecipient.Bytes(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("unknown beacon state version")
|
||||
return nil, nil, errors.New("unknown beacon state version")
|
||||
}
|
||||
|
||||
payloadID, _, err := vs.ExecutionEngineCaller.ForkchoiceUpdated(ctx, f, attr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not prepare payload")
|
||||
return nil, nil, errors.Wrap(err, "could not prepare payload")
|
||||
}
|
||||
if payloadID == nil {
|
||||
return nil, fmt.Errorf("nil payload with block hash: %#x", parentHash)
|
||||
return nil, nil, fmt.Errorf("nil payload with block hash: %#x", parentHash)
|
||||
}
|
||||
payload, err := vs.ExecutionEngineCaller.GetPayload(ctx, *payloadID, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
warnIfFeeRecipientDiffers(payload, feeRecipient)
|
||||
return payload, nil
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
|
||||
sc, err := vs.ExecutionEngineCaller.GetBlobsBundle(ctx, *payloadID)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get blobs bundle from execution client")
|
||||
}
|
||||
return payload, sc, nil
|
||||
}
|
||||
return payload, nil, nil
|
||||
}
|
||||
|
||||
// warnIfFeeRecipientDiffers logs a warning if the fee recipient in the included payload does not
|
||||
|
||||
@@ -143,7 +143,7 @@ func TestServer_getExecutionPayload(t *testing.T) {
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(tt.st.Slot(), 100, [8]byte{100}, [32]byte{'a'})
|
||||
_, err := vs.getExecutionPayload(context.Background(), tt.st.Slot(), tt.validatorIndx, [32]byte{'a'}, tt.st)
|
||||
_, _, err := vs.getExecutionPayload(context.Background(), tt.st.Slot(), tt.validatorIndx, [32]byte{'a'}, tt.st)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
@@ -179,7 +179,7 @@ func TestServer_getExecutionPayloadContextTimeout(t *testing.T) {
|
||||
}
|
||||
vs.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nonTransitionSt.Slot(), 100, [8]byte{100}, [32]byte{'a'})
|
||||
|
||||
_, err = vs.getExecutionPayload(context.Background(), nonTransitionSt.Slot(), 100, [32]byte{'a'}, nonTransitionSt)
|
||||
_, _, err = vs.getExecutionPayload(context.Background(), nonTransitionSt.Slot(), 100, [32]byte{'a'}, nonTransitionSt)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -224,7 +224,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) {
|
||||
BeaconDB: beaconDB,
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
gotPayload, err := vs.getExecutionPayload(context.Background(), transitionSt.Slot(), 0, [32]byte{}, transitionSt)
|
||||
gotPayload, _, err := vs.getExecutionPayload(context.Background(), transitionSt.Slot(), 0, [32]byte{}, transitionSt)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, gotPayload)
|
||||
|
||||
@@ -236,7 +236,7 @@ func TestServer_getExecutionPayload_UnexpectedFeeRecipient(t *testing.T) {
|
||||
payload.FeeRecipient = evilRecipientAddress[:]
|
||||
vs.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
|
||||
gotPayload, err = vs.getExecutionPayload(context.Background(), transitionSt.Slot(), 0, [32]byte{}, transitionSt)
|
||||
gotPayload, _, err = vs.getExecutionPayload(context.Background(), transitionSt.Slot(), 0, [32]byte{}, transitionSt)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, gotPayload)
|
||||
|
||||
|
||||
@@ -63,6 +63,7 @@ type Server struct {
|
||||
SlashingsPool slashings.PoolManager
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
BLSChangesPool blstoexec.PoolManager
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
MockEth1Votes bool
|
||||
Eth1BlockFetcher execution.POWBlockFetcher
|
||||
@@ -73,7 +74,6 @@ type Server struct {
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
BlockBuilder builder.BlockBuilder
|
||||
BLSChangesPool blstoexec.PoolManager
|
||||
}
|
||||
|
||||
// WaitForActivation checks if a validator public key exists in the active validator registry of the current
|
||||
|
||||
@@ -98,6 +98,7 @@ type Config struct {
|
||||
AttestationsPool attestations.Pool
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingsPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
SlashingChecker slasherservice.SlashingChecker
|
||||
SyncCommitteeObjectPool synccommittee.Pool
|
||||
BLSChangesPool blstoexec.PoolManager
|
||||
@@ -218,12 +219,12 @@ func (s *Service) Start() {
|
||||
SlashingsPool: s.cfg.SlashingsPool,
|
||||
StateGen: s.cfg.StateGen,
|
||||
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
|
||||
BLSChangesPool: s.cfg.BLSChangesPool,
|
||||
ReplayerBuilder: ch,
|
||||
ExecutionEngineCaller: s.cfg.ExecutionEngineCaller,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
ProposerSlotIndexCache: s.cfg.ProposerIdsCache,
|
||||
BlockBuilder: s.cfg.BlockBuilder,
|
||||
BLSChangesPool: s.cfg.BLSChangesPool,
|
||||
}
|
||||
validatorServerV1 := &validator.Server{
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
@@ -243,6 +244,7 @@ func (s *Service) Start() {
|
||||
ReplayerBuilder: ch,
|
||||
},
|
||||
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
|
||||
BLSChangesPool: s.cfg.BLSChangesPool,
|
||||
ProposerSlotIndexCache: s.cfg.ProposerIdsCache,
|
||||
}
|
||||
|
||||
@@ -296,6 +298,7 @@ func (s *Service) Start() {
|
||||
ReceivedAttestationsBuffer: make(chan *ethpbv1alpha1.Attestation, attestationBufferSize),
|
||||
CollectedAttestationsBuffer: make(chan []*ethpbv1alpha1.Attestation, attestationBufferSize),
|
||||
ReplayerBuilder: ch,
|
||||
BLSChangesPool: s.cfg.BLSChangesPool,
|
||||
}
|
||||
beaconChainServerV1 := &beacon.Server{
|
||||
CanonicalHistory: ch,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user